SWMAL Exercise 1 Intro¶
#comment
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "fundamentals"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("IGNORING: Saving figure", fig_id) # SWMAL: I've disabled saving of figures
#if tight_layout:
# plt.tight_layout()
#plt.savefig(path, format='png', dpi=300)
# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
print("OK")
OK
def prepare_country_stats(oecd_bli, gdp_per_capita):
oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"]
oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value")
gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True)
gdp_per_capita.set_index("Country", inplace=True)
full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita,
left_index=True, right_index=True)
full_country_stats.sort_values(by="GDP per capita", inplace=True)
remove_indices = [0, 1, 6, 8, 33, 34, 35]
keep_indices = list(set(range(36)) - set(remove_indices))
return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices]
print("OK")
OK
import os
datapath = os.path.join("../datasets", "lifesat", "")
# NOTE: a ! prefix makes us able to run system commands..
# (command 'dir' for windows, 'ls' for Linux or Macs)
#
! dir
! ls
print("\nOK")
/bin/bash: dir: command not found Figs demo.ipynb modules_and_classes.ipynb datasets intro.ipynb datasets.zip lesson01.pdf OK
# Code example
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn.linear_model
# Load the data
try:
oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',')
gdp_per_capita = pd.read_csv(datapath + "gdp_per_capita.csv",thousands=',',delimiter='\t',
encoding='latin1', na_values="n/a")
except Exception as e:
print(f"SWMAL NOTE: well, you need to have the 'datasets' dir in path, please unzip 'datasets.zip' and make sure that its included in the datapath='{datapath}' setting in the cell above..")
raise e
# Prepare the data
country_stats = prepare_country_stats(oecd_bli, gdp_per_capita)
X = np.c_[country_stats["GDP per capita"]]
y = np.c_[country_stats["Life satisfaction"]]
# Visualize the data
country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction')
plt.show()
# Select a linear model
model = sklearn.linear_model.LinearRegression()
# Train the model
model.fit(X, y)
# Make a prediction for Cyprus
X_new = [[22587]] # Cyprus' GDP per capita
y_pred = model.predict(X_new)
print(y_pred) # outputs [[ 5.96242338]]
print("OK")
[[5.96242338]] OK
oecd_bli = pd.read_csv(datapath + "oecd_bli_2015.csv", thousands=',')
oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"]
oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value")
#oecd_bli.head(2)
gdp_per_capita = pd.read_csv(datapath+"gdp_per_capita.csv", thousands=',', delimiter='\t',
encoding='latin1', na_values="n/a")
gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True)
gdp_per_capita.set_index("Country", inplace=True)
#gdp_per_capita.head(2)
full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capita, left_index=True, right_index=True)
full_country_stats.sort_values(by="GDP per capita", inplace=True)
#full_country_stats
remove_indices = [0, 1, 6, 8, 33, 34, 35]
keep_indices = list(set(range(36)) - set(remove_indices))
sample_data = full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices]
#missing_data = full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[remove_indices]
sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(5,3))
plt.axis([0, 60000, 0, 10])
position_text = {
"Hungary": (5000, 1),
"Korea": (18000, 1.7),
"France": (29000, 2.4),
"Australia": (40000, 3.0),
"United States": (52000, 3.8),
}
for country, pos_text in position_text.items():
pos_data_x, pos_data_y = sample_data.loc[country]
country = "U.S." if country == "United States" else country
plt.annotate(country, xy=(pos_data_x, pos_data_y), xytext=pos_text,
arrowprops=dict(facecolor='black', width=0.5, shrink=0.1, headwidth=5))
plt.plot(pos_data_x, pos_data_y, "ro")
#save_fig('money_happy_scatterplot')
plt.show()
from sklearn import linear_model
lin1 = linear_model.LinearRegression()
Xsample = np.c_[sample_data["GDP per capita"]]
ysample = np.c_[sample_data["Life satisfaction"]]
lin1.fit(Xsample, ysample)
t0 = 4.8530528
t1 = 4.91154459e-05
sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(5,3))
plt.axis([0, 60000, 0, 10])
M=np.linspace(0, 60000, 1000)
plt.plot(M, t0 + t1*M, "b")
plt.text(5000, 3.1, r"$\theta_0 = 4.85$", fontsize=14, color="b")
plt.text(5000, 2.2, r"$\theta_1 = 4.91 \times 10^{-5}$", fontsize=14, color="b")
#save_fig('best_fit_model_plot')
plt.show()
print("OK")
OK
Qa: The $\theta$ parameters and the $R^2$ Score¶
We will be using the $\theta$ parameters from the model declared above. To use the $\theta$ parameters we must extract them from the model. If we want to get the intercept and coefficient params, also called $\theta_0$ and $\theta_1$. For a given model, it is possible to extract $\theta_0$ and $\theta_1$ with following python attributes.
theta0 = model.intercept_ # Intercept
theta1 = model.coef_[0]
print(f"{theta0}")
print(f"{theta1}")
[4.8530528] [4.91154459e-05]
r_sq = model.score(X,y)
print(f"{r_sq}")
0.7344414355437031
The $R^2$ score, that is showed, is the coefficient of determination. It determines the fitness of the data to the linear model. It has a max value of 1, which indicates that the data match perfectly to the linear model. If you have a coefficient of determination below 0, it indicates that the model that is in use in incorrect. Therefore the values for $R^2~$ range from minus infinity to 1
It is best to have a high score, as to the fact that the max value 1 tells us that the data fit perfectly to the linear model.
Qb) Using k-Nearest Neighbors¶
In this exercise we will be using the K-nearest neighbors to train the model.
K-nearest is a supervised learning classifier. It uses the amount k nearest data points in the training dataset it has while the linear regression model doesn't use anything when instantiated. The default value for K-nearest model is 5 if you don't change it. The method for calling fit and predict both for K-nearest model and linear regression model are the same.
The score of the model can be compared in the sense that they provide information on how well the model performs. The score will not have a precise and definite saying about which model is best, but if one of the models are almost zero or less compared to another model is are almost 1, we can say that the model closest to 1 is a better model in this thought experiment. If you want to compare them you will also have to look at other different evaluation types like F1-Score.
# and this is our preprocessed data
country_stats
| GDP per capita | Life satisfaction | |
|---|---|---|
| Country | ||
| Russia | 9054.914 | 6.0 |
| Turkey | 9437.372 | 5.6 |
| Hungary | 12239.894 | 4.9 |
| Poland | 12495.334 | 5.8 |
| Slovak Republic | 15991.736 | 6.1 |
| Estonia | 17288.083 | 5.6 |
| Greece | 18064.288 | 4.8 |
| Portugal | 19121.592 | 5.1 |
| Slovenia | 20732.482 | 5.7 |
| Spain | 25864.721 | 6.5 |
| Korea | 27195.197 | 5.8 |
| Italy | 29866.581 | 6.0 |
| Japan | 32485.545 | 5.9 |
| Israel | 35343.336 | 7.4 |
| New Zealand | 37044.891 | 7.3 |
| France | 37675.006 | 6.5 |
| Belgium | 40106.632 | 6.9 |
| Germany | 40996.511 | 7.0 |
| Finland | 41973.988 | 7.4 |
| Canada | 43331.961 | 7.3 |
| Netherlands | 43603.115 | 7.3 |
| Austria | 43724.031 | 6.9 |
| United Kingdom | 43770.688 | 6.8 |
| Sweden | 49866.266 | 7.2 |
| Iceland | 50854.583 | 7.5 |
| Australia | 50961.865 | 7.3 |
| Ireland | 51350.744 | 7.0 |
| Denmark | 52114.165 | 7.5 |
| United States | 55805.204 | 7.2 |
In the cell below we start with preparing the data and visualizing it. We use the K-nearest neighbor regressor from sklearn which can be found: https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KNeighborsRegressor.html
# Import the KNeighborsRegressor
from sklearn.neighbors import KNeighborsRegressor
#Prepare the data
X = np.c_[country_stats["GDP per capita"]]
y = np.c_[country_stats["Life satisfaction"]]
print("X.shape=",X.shape)
print("y.shape=",y.shape)
# Visualize the data
country_stats.plot(kind='scatter', x="GDP per capita", y='Life satisfaction')
plt.show()
# Instantiate
knn = KNeighborsRegressor(n_neighbors=3)
# Train model
knn.fit(X, y)
# Predict and print the knn = 5.766667
print(f"K-nearstneighbor prediction for Cypress: {knn.predict(X_new)}")# knn = ..
print(f"Linearregression prediction for Cypress: {lin1.predict(X_new)}")
X.shape= (29, 1) y.shape= (29, 1)
K-nearstneighbor prediction for Cypress: [[5.76666667]] Linearregression prediction for Cypress: [[5.96242338]]
As you can see the prediction for the k-nearest neighbor and linear regression are close to each other. However, they are not the same. This is due to the fact that they are fundamentally different algorithms.
Qc) Tuning Parameter for k-Nearest Neighbors and A Sanity Check¶
In this chapter we will look into the tuning of parameter for k-nearest neighbors. By changing the parameter we will look into how it works and do a santity check. We will answer why the score closed to 1 does not mean, that the k-nearest nieghbors is the right esimater for the job.
The problem with having a k-nearest neighbor produces a score that is 1. It makes a prediction based on the single closest value and that can cause overfitting. Or as you can see on the picture where N=1, the line does follow the rest of the data and is overfitting.
It is also a problem if the k-nearest neighbor gets too large, because the data does not follow the data as you can observe below where K=20.
sample_data.plot(kind='scatter', x="GDP per capita", y='Life satisfaction', figsize=(5,3))
plt.axis([0, 60000, 0, 10])
# create an test matrix M, with the same dimensionality as X, and in the range [0;60000]
# and a step size of your choice
m=np.linspace(0, 60000, 1000)
M=np.empty([m.shape[0],1])
M[:,0]=m
# from this test M data, predict the y values via the lin.reg. and k-nearest models
y_pred_lin = model.predict(M)
y_pred_knn = knn.predict(M)
# use plt.plot to plot x-y into the sample_data plot..
# Plot linear regression
plt.plot(m, y_pred_lin, "r--", label="linear")
# Plot Neighbor set to 3
plt.plot(m, y_pred_knn, "b--", label="n=3")
# Import the KNeighbor regressor
from sklearn.neighbors import KNeighborsRegressor
# Neighbor set to 1
knn1 = KNeighborsRegressor(n_neighbors=1)
knn1.fit(X, y)
y_pred_knn1 = knn1.predict(M)
plt.plot(m, y_pred_knn1, "c--", label="n=1")
# Neighbor set to 10
knn2 = KNeighborsRegressor(n_neighbors=10)
knn2.fit(X, y)
y_pred_knn2 = knn2.predict(M)
plt.plot(m, y_pred_knn2, "y--", label="n=10")
# Legends
plt.legend()
# Print
print(f"1 nearest neighbour score: {knn1.score(X, y)}")
print(f"3 nearest neighbour score: {knn.score(X, y)}")
print(f"10 nearest neighbour score: {knn2.score(X, y)}")
1 nearest neighbour score: 1.0 3 nearest neighbour score: 0.8525732853499179 10 nearest neighbour score: 0.7833080605150065
Above you can both see the score and the plotting of the 3 different amounts of neighbors. The K-nearest neighbor with 1 neighbor has a score 1, and follows the data complete, which is overfitting. The n=3 is better because it follows the flow, but not as much and are less change of overfitting. Looking at n=10 the line is much smoother, but this will also mean that there is a loss of detail. It is difficult to tell which model are the best one from the diagram. The goal is to have a balance between having a small enough k to capture local patterns and but also large enough to reduce noise.
Qd) Trying out a Neural Network¶
We are going to try to use the neural network. We are using the Multi-Layer Perceptron (MLP) for regression. We find the MLPregressor here: https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html
OBS. Each run of the column below produces a different outcome, because we make a new MPLRegressor and training again.
from sklearn.neural_network import MLPRegressor;
# Setup MLPRegressor
mlp = MLPRegressor( hidden_layer_sizes=(10,), solver='adam', activation='relu', tol=1E-5, max_iter=100000, verbose=True);
mlp.fit(X, y.ravel())
# lets make a MLP regressor prediction and redo the plots
y_pred_mlp = mlp.predict(M)
Iteration 1, loss = 103973.70805067 Iteration 2, loss = 94453.76574481 Iteration 3, loss = 85404.46329600 Iteration 4, loss = 76833.18742100 Iteration 5, loss = 68745.92120590 Iteration 6, loss = 61147.09257150 Iteration 7, loss = 54039.42244438 Iteration 8, loss = 47423.77504388 Iteration 9, loss = 41299.01321714 Iteration 10, loss = 35661.86233206 Iteration 11, loss = 30506.78685617 Iteration 12, loss = 25825.88438528 Iteration 13, loss = 21608.80250166 Iteration 14, loss = 17842.68438490 Iteration 15, loss = 14512.14950113 Iteration 16, loss = 11599.31587194 Iteration 17, loss = 9083.87027414 Iteration 18, loss = 6943.19213986 Iteration 19, loss = 5152.53581267 Iteration 20, loss = 3685.27408974 Iteration 21, loss = 2513.20360678 Iteration 22, loss = 1606.90963009 Iteration 23, loss = 936.18432471 Iteration 24, loss = 470.48878361 Iteration 25, loss = 179.44534259 Iteration 26, loss = 33.34335407 Iteration 27, loss = 3.63906801 Iteration 28, loss = 63.42895392 Iteration 29, loss = 187.87597817 Iteration 30, loss = 354.57014607 Iteration 31, loss = 543.80794654 Iteration 32, loss = 738.77991743 Iteration 33, loss = 925.66093145 Iteration 34, loss = 1093.60344758 Iteration 35, loss = 1234.63933088 Iteration 36, loss = 1343.50044206 Iteration 37, loss = 1417.37170021 Iteration 38, loss = 1455.59255575 Training loss did not improve more than tol=0.000010 for 10 consecutive epochs. Stopping.
plt.plot(m, y_pred_lin, "r", label="linear")
plt.plot(m, y_pred_knn, "b", label="neigh=3")
plt.plot(m, y_pred_mlp, "k", label="MLP")
plt.legend();
print(f"MLP score: {mlp.score(X, y)}")
print(f"K-nearstneighbor=3 score: {knn.score(X, y)}")
print(f"Linearregression score: {lin1.score(X, y)}")
MLP score: -3.6741440524092503 K-nearstneighbor=3 score: 0.8525732853499179 Linearregression score: 0.7344414355437031
The score for the MLP differs each time you train the model, sometimes the score is -25.42, -3.85 or other values. As we can observe both from the score and the plot the Multi-Layer Perception from the Neural Network does not fit the data well for iterations up till 10000. The score can again be used as a indicator of the fitness of the model. As we can see the data-set for the training does not scale well with the MLP without prescaling. The next chapter we will use the prescaling.
SWMAL Exercise¶
Python Basics¶
Modules and Packages in Python¶
Qa Load and test the libitmal module¶
This chapter will decribe how to load and test modules. Firstly you have to import sys and os for being allowed to work with the system and the operation system. Here you will load the gitmal package. The import can be done a few different ways and in the example below the expanduser will be used. You can see below that we print of the path and the files.
import sys,os
sys.path.append(os.path.expanduser('../'))
from libitmal import utils as itmalutils
print(dir(itmalutils))
print(itmalutils.__file__)
itmalutils.TestAll()
['AssertInRange', 'CheckFloat', 'DToXy', 'GenerateConfusionMatrix', 'GenerateResults', 'InRange', 'Iterable', 'ListToMatrix', 'ListToVector', 'PrintMatrix', 'ResetRandom', 'ShowResult', 'TEST', 'TestAll', 'TestCheckFloat', 'TestPrintMatrix', 'TestVarName', 'VarName', 'XyToD', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'ctxlib', 'inf', 'inspect', 'isFloat', 'isList', 'isNumpyArray', 'nan', 'np', 'random', 're', 'sklearn']
c:\Users\Bjark\source\repos\MAL\mal12\L01\..\libitmal\utils.py
TestPrintMatrix...(no regression testing)
X=[[ 1. 2.]
[ 3. -100.]
[ 1. -1.]]
X=[[ 1. 2.]
...
[ 1. -1.]]
X=[[ 1.
2. ]
[ 3.0001
-100. ]
[ 1.
-1. ]]
X=[[ 1. 2.]
[ 3. -100.]
[ 1. -1.]]
OK
TEST: OK
ALL OK
Qb Create your own module, with some functions, and test it¶
Here we create our own module. The module is places inside the mal12, which is our github folder. This will make it easy to collaborate and will be pushed every time changed are made. Creating our module is done by creating two files. The "init.py" and "utils.py"
import sys,os
import numpy as np
sys.path.append(os.path.expanduser('~/MAL/mal12'))
from UtilsMAL import utils as myUtils
print(dir(myUtils))
print(myUtils.__file__)
L1= myUtils.L1
dummy = np.array([3,4,5])
print(f"The answer for the L1: {L1(dummy)}")
['L1', 'L2', 'L2Dot', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'checkArraySize', 'checkArraySizeCoantainsElements', 'if_windoes', 'np', 'os'] c:\Users\Bjark\source\repos\MAL\mal12\L01\..\UtilsMAL\utils.py The answer for the L1: 12.0
Qc How do you 'recompile' a module?¶
If you are using the Visual Studie Code the reloading works out-of-the-box. However, this is not the same for Jupyter Notebook. In Jupyter you have to either use the importlib or use a smart syntax for autoreload. The information is found at: Jupyter Notebook Reload Module: A Comprehensive Guide. Below you can see the different ways to reload. For autoreaload, when the parameter is set to 2 it load on every reload.
import importlib
importlib.reload(myUtils)
# Auto reload
%load_ext autoreload
%autoreload 2
[OPTIONAL] Qd Write a Howto on Python Modules a Packages¶
Write a short description of how to use modules in Python (notes on modules path, import directives, directory structure, etc.)
In this chapter we will look into creating our own library. Firstly create a new folder and name is UtilsMal. Inside this Now will create two files. Them first file called "init.py" and the second utils
Classes in Python¶
Qe Extend the class with some public and private functions and member variables¶
For creating a class in python you will often see the keyword self. It is the representation of the instance of the class. The "self" keyword is be renamed, but the convention is self. If you forget the "self" keyword the class will be static. A static class are not allowed to call the members of the class or make a instance of the class. In the codesnippets below you can observe the def myfun(): where self is forgotten. IF you run the code you will get the error: TypeError: MyClass.myfun() takes 0 positional arguments but 1 was given. The error is because a instance myobjectx of MyClass is made and it attempt to call the myobjectx.myfun() Python will raise a the error, because it is excepting one argument "self".
class MyClass:
def myfun(self):
myvar = "blah" # NOTE: a per class-instance variable.
print(f"This is a message inside the class, myvar={myvar}.")
myobjectx = MyClass()
print(myobjectx.myfun())
This is a message inside the class, myvar=blah. None
Member are public when there is non underscore before the variable. If there are one underscore the member are protected. Private if two underscores. The function are private if you use a single underscore as a prefix to their name.
Qf Extend the class with a Constructor¶
The constructor in Python is defined using the __init__ method. The the constructor take the parameter self and can be add more to parameter if needed. If you dont want a class the need a contructor, you can create a static class. The destructor in Python is ofte referred as __del__ or __del1__. The destructor is used to define behavior that occurs when an object is about to be destroyed or garbage collected.
class NewClass:
def fun():
return ("Static instance"); # private instance attribute
print(NewClass.fun());
Static instance
class MyClass:
def __init__(self, text):
self.__myvar=text # private instance attribute
def myfun(self, text):
vs = text
print(f"This is a message inside the class, myvar={vs}.")
myobjectx = MyClass("text")
print(myobjectx.myfun("nontext"))
This is a message inside the class, myvar=nontext. None
Qg Extend the class with a to-string function¶
This chapter will show how to make a tostring() function in Python. Call the function __str__, because the standard way to do it in Python. We call the funcion inside the print function.
class MyClass:
def __init__(self, text):
self.__myvar=text
def __str__(self):
return f"MyClass instance with value: {self.__myvar}"
myobjectx = MyClass("text")
print(myobjectx.__str__())
MyClass instance with value: text
SWMAL Exercise¶
Mathematical Foundation¶
Vector and matrix representation in python¶
$$ \newcommand\rem[1]{} \rem{SWMAL: CEF def and LaTeX commands, remember: no newlines in defs} \newcommand\eq[2]{#1 &=& #2\\} \newcommand\ar[2]{\begin{array}{#1}#2\end{array}} \newcommand\ac[2]{\left[\ar{#1}{#2}\right]} \newcommand\st[1]{_{\scriptsize #1}} \newcommand\norm[1]{{\cal L}_{#1}} \newcommand\obs[2]{#1_{\text{\scriptsize obs}}^{\left(#2\right)}} \newcommand\diff[1]{\text{d}#1} \newcommand\pown[1]{^{(#1)}} \def\pownn{\pown{n}} \def\powni{\pown{i}} \def\powtest{\pown{\text{\scriptsize test}}} \def\powtrain{\pown{\text{\scriptsize train}}} \def\bX{\mathbf{M}} \def\bX{\mathbf{X}} \def\bZ{\mathbf{Z}} \def\bw{\mathbf{m}} \def\bx{\mathbf{x}} \def\by{\mathbf{y}} \def\bz{\mathbf{z}} \def\bw{\mathbf{w}} \def\btheta{{\boldsymbol\theta}} \def\bSigma{{\boldsymbol\Sigma}} \def\half{\frac{1}{2}} $$
This exercise is about vector and matrix representation in Python. Firstly, there will be showed how to implement an array in python. Secondly, the $\norm{1}$ and $\norm{2}$ will be implemented without using sqrt or other mathmatical operators from math the library.
Qa Given the following $\mathbf{x}^{(i)}$'s, construct and print the $\mathbf X$ matrix in python.¶
$$ \ar{rl}{ \bx\pown{1} &= \ac{c}{ 1, 2, 3}^T \\ \bx\pown{2} &= \ac{c}{ 4, 2, 1}^T \\ \bx\pown{3} &= \ac{c}{ 3, 8, 5}^T \\ \bx\pown{4} &= \ac{c}{-9,-1, 0}^T } $$
For using the Python in the course we will have to get familiar with Python and the libraries.
In this chapter we will show how you can implement an array using numpy. You can either create the whole array at once or you can use the append function. The append take two arrays and put them together in a new array. Another function is the vstack, where the function vertically stack each element.
import numpy as np
y = np.array([1, 2, 3, 4]) # NOTE: you'll need this later
# Default method
X = np.array([[1, 2, 3],[4, 2, 1], [3, 8, 5], [-9, -1, 0]])
# print the full matrix
print(f"X = \n{X}")
# use the vstack
np.vstack(([1, 2, 3],[4, 2, 1], [3, 8, 5], [-9, -1, 0]))
# Use append
q= np.array([[1, 2, 3],[4, 2, 1]] ) # NOTE: you'll need this later
q = np.append(q, [[3, 8, 5], [-9, -1, 0]],axis = 0)
# print the full matrix
print(f"q = \n{q}")
X = [[ 1 2 3] [ 4 2 1] [ 3 8 5] [-9 -1 0]] q = [[ 1 2 3] [ 4 2 1] [ 3 8 5] [-9 -1 0]]
Norms, metrics or distances¶
Qb Implement the $\norm{1}$ and $\norm{2}$ norms for vectors in python.¶
Here we will ilustrate how to use the norms, metrices or distances. The first thing you will notice below is two functions for error validation, that belongs to an exercise that will be decribed later on. The test of the validation is done up here, we will explain alot more later on. Just below the validation function, you can observe the L1, L2 and L2Dot. The L1 is given the distance between two vectors. Therefore, it takes the absolute value and sum all numbers together.
$$ \ar{ll}{ \text{d}(\bx,\by) &= ||\bx-\by||_2\\ &= \left( \sum_{i=1}^n \left| x_{i}-y_{i} \right|^2 \right)^{1/2} } $$
The L2 is the Eucalidian distance or norm for a vector. You can define it through a sum operation or a dot operation. Dot operation is vector multiplied with itself
$$ \norm{2}:~~ ||\bx||_2 = \left( \sum_{i=1}^{n} |x_i|^2 \right)^{1/2}\\ $$
import math
def checkArraySize(X):
assert X.shape[0]>=0
if not X.ndim==1:
raise Exception("Array size needs to bigger than 1 dimision")
def L1(X):
checkArraySize(X)
return sum((Xi**2)**0.5 for Xi in X)
def L2(X):
checkArraySize(X)
return sum((Xi*Xi) for Xi in X) **(1/2)
def L2Dot(X):
return np.sqrt(np.dot(X,X))
# Test the function with the following vectors
tx=np.array([1, 2, 3, -1])
ty=np.array([3,-1, 4, 1])
expected_d1=8.0
expected_d2=4.242640687119285
d1=L1(tx-ty)
d2=L2(tx-ty)
print(f"tx-ty={tx-ty}, d1-expected_d1={d1-expected_d1}, d2-expected_d2={d2-expected_d2}")
eps=1E-9
# NOTE: remember to import 'math' for fabs for the next two lines..
assert math.fabs(d1-expected_d1)<eps, "L1 dist seems to be wrong"
assert math.fabs(d2-expected_d2)<eps, "L2 dist seems to be wrong"
print("OK(part-1)")
# comment-in once your L2Dot fun is ready...
d2dot=L2Dot(tx-ty)
print("d2dot-expected_d2=",d2dot-expected_d2)
assert math.fabs(d2dot-expected_d2)<eps, "L2Ddot dist seem to be wrong"
print("OK(part-2)")
tx-ty=[-2 3 -1 -2], d1-expected_d1=0.0, d2-expected_d2=0.0 OK(part-1) d2dot-expected_d2= 0.0 OK(part-2)
The cost function, $J$¶
Qc Construct the Root Mean Square Error (RMSE) function (Equation 2-1 [HOML]).¶
We will here show how to call the function Root Mean Square Error (RMSE), by using the $\bX$ matrix and $\by$ from Qa. There will be implemented a dummy hypothesis function. With this function we will be taking the first column of $\bX$ as its 'prediction'
$$ h\st{dummy}(\bX) = \bX(:,0) $$
We will be calling the '''L2''' function that has been implemented internally in RMSE
def RMSE(X,y):
dif_array = X - y
checkArraySize(dif_array)
return (L2(dif_array))*0.5
# Dummy h function:
def h(X):
if X.ndim!=2:
raise ValueError("excpeted X to be of ndim=2, got ndim=",X.ndim)
if X.shape[0]==0 or X.shape[1]==0:
raise ValueError("X got zero data along the 0/1 axis, cannot continue")
return X[:,0]
# Calls your RMSE() function:
r=RMSE(h(X),y)
# TEST vector:
eps=1E-9
expected=6.57647321898295
print(f"RMSE={r}, diff={r-expected}")
assert math.fabs(r-expected)<eps, "your RMSE dist seems to be wrong"
print("OK")
RMSE=6.576473218982953, diff=2.6645352591003757e-15 OK
MAE¶
Qd Similar construct the Mean Absolute Error (MAE) function (Equation 2-2 [HOML]) and evaluate it.¶
As we just created a function for the RMSE, we also want to create a function for the Mean Absolute Error (MAE). The MAE will internally use the $\norm{1}$
$$ \ar{rl}{ \text{MSE} &= \frac{1}{n} \sum_{i=1}^{n} (\hat{Z}_i-Z_i)^2 = \frac{1}{n} SS\\ \text{RMSE} &= \sqrt{\text{MSE}}\ } $$
def MAE(X,y):
checkArraySize(X)
L1(X-y)
return L1(X-y)/X.size
# Calls your MAE function:
r=MAE(h(X), y)
# TEST vector:
expected=3.75
print(f"MAE={r}, diff={r-expected}")
assert math.fabs(r-expected)<eps, "MAE dist seems to be wrong"
print("OK")
MAE=3.75, diff=0.0 OK
def checkArraySize(X):
assert X.shape[0]>=0
if not X.ndim==1:
raise Exception("Array size needs to bigger than 1 dimision")
def L1(X):
checkArraySize(X)
return sum((Xi**2)**0.5 for Xi in X)
def L2(X):
checkArraySize(X)
return sum((Xi*Xi) for Xi in X) **(1/2)
def RMSE(X,y):
dif_array = X - y
checkArraySize(dif_array)
return (L2(dif_array))*0.5
def MAE(X,y):
checkArraySize(X)
L1(X-y)
return L1(X-y)/X.size
Qf Conclusion¶
In this exercise we have implemented the L1 and L2 distance functions. We have also manually implemented the RMSE and MAE functions without the use of built-in function. We have tested the functions with the given vectors and the results are as expected. This is to learn and become familiar with the formulars and functions. And get a better understanding the math behind the methods often used in Machine Learning. Lastly, we also have learned how to check if the functionality is correct in the functions by adding error checking in our python code.
SWMAL Exercise¶
Implementing a dummy binary-classifier with fit-predict interface¶
In this exercise we will working with the MNIST data-set and reuse the data loader from Scikit-learn. Next we create a dummy classifier, and compare the results of the SGD and dummy classifiers using the MNIST data.
Qa Load and display the MNIST data¶
We will fetch the MNIST data with a sklearn.datasets.fetch_openml dataloader interface in Scikit-learn.
Two functions are created. One to fetch the data-set and one to plot a single digit.
from sklearn.datasets import fetch_openml
import matplotlib
import matplotlib.pyplot as plt
# Function to fetch the MNIST dataset
def MNIST_GetDataSet():
X,y = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame = False)
return X,y
# Function to plot a digit
%matplotlib inline
def MNIST_PlotDigit(data):
image = data.reshape(28, 28)
plt.imshow(image, cmap = matplotlib.cm.binary, interpolation="nearest")
plt.axis("off")
# Fetches X and y
X,y = MNIST_GetDataSet()
#Convert to [0;1] via scaling
X = X / 255.
#Check if data is loaded
print("Number of samples:", len(X))
digit_to_plot = X[9] # The index is 9 for the 10th digit (0-based index)
#Check what number the image is representing
print("Label of the digit:", y[9])
MNIST_PlotDigit(digit_to_plot)
c:\Users\oscar\AppData\Local\Programs\Python\Python311\Lib\site-packages\sklearn\datasets\_openml.py:1002: FutureWarning: The default value of `parser` will change from `'liac-arff'` to `'auto'` in 1.4. You can set `parser='auto'` to silence this warning. Therefore, an `ImportError` will be raised from 1.4 if the dataset is dense and pandas is not installed. Note that the pandas parser may return different data types. See the Notes Section in fetch_openml's API doc for details. warn(
Number of samples: 70000 Label of the digit: 4
As we can see, a random digit is chosen (9). The image is shown and the y_true is 4.
Qb Add a Stochastic Gradient Decent [SGD] Classifier¶
In this section we create a train-test data-set for MNIST and use the SGDClassifier as done in [HOML].
We split the data and run the fit-predict for the classifier using the MNIST data. We will try to predict and identify one-digit. In this example we will try to detect if the image shows the number 5. This ā5-detectorā will be an example of a binary classifier, capable of distinguishing between just two classes, 5 and non-5. We will also see if there are any missclassified.
# TODO: add your code here..
import numpy as np
from sklearn.linear_model import SGDClassifier
#reshapes X to 2D array if not already
if X.ndim==3:
print("reshaping X..")
assert y.ndim==1
X = X.reshape((X.shape[0],X.shape[1]*X.shape[2]))
assert X.ndim==2
print(f"X.shape={X.shape}") # X.shape= (70000, 784)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
# Create binary target variable y_train_5 with the digit '5' as true
y_train_5 = (y_train == '5') # True for all 5s, False for all other digits
# Create binary target variable for the testing set
y_test_5 = (y_test == '5')
# Create a SGDClassifier which will classify if a digit is a 5 or not
sgd_clf = SGDClassifier(random_state=42)
# Train the classifier using the training data
sgd_clf.fit(X_train, y_train_5)
y_pred = sgd_clf.predict(X_test)
classified_numbers = np.where(y_pred == y_test_5)[0]
for number in classified_numbers[:3]:
MNIST_PlotDigit(X_test[number])
# Predict if the digit is a 5 or not
prediction = sgd_clf.predict([X_test[number]]) # Provide the data sample
print(f"Predicted Label: {prediction[0]}")
plt.show()
X.shape=(70000, 784) Predicted Label: False
Predicted Label: False
Predicted Label: False
Given a loop that iterates through the first 3 instances where y_pred = y_test_5 (correctly classified numbers), we see that the predicted labels are consistent with images
# Find misclassified instances
misclassified_numbers = np.where(y_pred != y_test_5)[0]
classified_numbers = np.where(y_pred == y_test_5)[0]
for number in misclassified_numbers[:3]:
MNIST_PlotDigit(X_test[number])
prediction = sgd_clf.predict([X_test[number]]) # Provide the data sample
print(f"Predicted Label: {prediction[0]}")
plt.show()
Predicted Label: False
Predicted Label: False
Predicted Label: False
In the same way, we identified some digits that were missclassified. It seems that it is not always precise.
Qc Implement a dummy binary classifier¶
In this section we try to create a dummy classifier that just classifies every single image in class of the majority, which in this case is the non-5 class. We call the fit-predict on our dummy class and extract the accuract score from the test data.
from sklearn.metrics import accuracy_score
import numpy as np
class DummyClassifier():
def fit(self, X, y):
# does not require any training, only memorizes the unique classes in y.
self.classes_ = np.unique(y)
self.dummy_class_ = np.argmax(np.bincount(y)) # Select the most frequent class as the dummy class
return self
def predict(self, X):
# Returns array of the same length as X with the dummy class label
return np.full(X.shape[0], fill_value=self.dummy_class_)
dummy_clf = DummyClassifier()
dummy_clf.fit(X_train, y_train_5) #trains the dummy classifier. Does not do anything
y_pred = dummy_clf.predict(X_test)
print(any(y_pred)) #predicts if any of the digits is a 5, gives a False
accuracy_score(y_test_5, y_pred)
#91% accuracy, just like in HOML
False
0.9108
With the dummy class every digit is a non-5. When we extract the accuracy score we get the same score as in HOML.
Qd Conclusion¶
As seen in the exercise we can use the SGDClassifier to train our data-set from MNIST. Through the exercise we can conclude that the train and test fits and has some correct predictions and some incorrect predictions. When testing the performance measures for the classifier it is a bit tricky to evaluate. We see that we get a 91% accuracy score for our DummyClassifier, which only spits a false out for every digit. But because the majority for the digits are non-5 (90% of them), this will then be our score. In this case, the accuracy score does not represent how good/fit our model is at all. We can therefore conclude, that it is not always preferred so use accuracy to define the performance measures when dealing with classifiers. Especially not, in data-sets where some classes are much more frequent than others.
SWMAL Exercise Performance metrics¶
In this exercise we will take a look at various ways to evaluate your algorithm using performance metrics. We will do this be recreating some of the well known metric formulas in python functions, and comparing them to the version found in the Sklearn.metrics library. These functions will then be tested on the algorithms from the dummy classifier exercise.
The following codebit is copied from the dummy classifier exercise, and will therefore not be described further.
from sklearn.datasets import fetch_openml
import matplotlib
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score
import numpy as np
# Function to fetch the MNIST dataset
def MNIST_GetDataSet():
X,y = fetch_openml('mnist_784', version=1, return_X_y=True, as_frame = False)
return X,y
# Fetches X and y
X,y = MNIST_GetDataSet()
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
# Create binary target variable y_train_5 with the digit '5' as true
y_train_5 = (y_train == '5') # True for all 5s, False for all other digits
# Create binary target variable for the testing set
y_test_5 = (y_test == '5')
# Create a SGDClassifier which will classify if a digit is a 5 or not
sgd_clf = SGDClassifier(random_state=42)
# Train the classifier using the training data
sgd_clf.fit(X_train, y_train_5)
#Convert to [0;1] via scaling
X = X / 255.
y_pred_sdg = sgd_clf.predict(X_test)
#Check if data is loaded
class DummyClassifier():
def fit(self, X, y):
# does not require any training, only memorizes the unique classes in y.
self.classes_ = np.unique(y)
self.dummy_class_ = np.argmax(np.bincount(y)) # Select the most frequent class as the dummy class
return self
def predict(self, X):
# Returns array of the same length as X with the dummy class label
return np.full(X.shape[0], fill_value=self.dummy_class_)
dummy_clf = DummyClassifier()
dummy_clf.fit(X_train, y_train_5)
y_pred_dummy = dummy_clf.predict(X_test)
c:\Users\oscar\AppData\Local\Programs\Python\Python311\Lib\site-packages\sklearn\datasets\_openml.py:1002: FutureWarning: The default value of `parser` will change from `'liac-arff'` to `'auto'` in 1.4. You can set `parser='auto'` to silence this warning. Therefore, an `ImportError` will be raised from 1.4 if the dataset is dense and pandas is not installed. Note that the pandas parser may return different data types. See the Notes Section in fetch_openml's API doc for details. warn(
Qa Implement the Accuracy function and test it on the MNIST data.¶
an accuracy function will be created called MyAccuracy. It will return how many elements where predicted correctly. Furthermore a test function will be created. This function will use both the selfmade function and the sklearn version for validation. The testfunction will then be used on a simple example, the sdg and the dummy.
# TODO: Qa...
from sklearn.metrics import accuracy_score
def MyAccuracy(y_true, y_pred):
if len(y_true)==0 or len(y_pred)==0:
return 0
y_correctPred=[]
for i in range(len(y_true)):
if y_true[i]==y_pred[i]:
y_correctPred.append(y_pred[i])
return len(y_correctPred)/len(y_true)
# TEST FUNCTION: example of a comperator, using Scikit-learn accuracy_score
def TestAccuracy(y_true, y_pred):
if len(y_true) != len(y_pred):
print("Error: Input arrays have different lengths.")
return
a0=MyAccuracy(y_true, y_pred)
a1=accuracy_score(y_true, y_pred)
print(f"\nmy a ={a0}")
print(f"scikit-learn a={a1}")
print("\nTestAccuracy() simple example:")
TestAccuracy([1,2,3,4], [1,2,3,3])
print("\nTestAccuracy() on sdg:")
TestAccuracy(y_test_5, y_pred_sdg)
print("\nTestAccuracy() on dummy:")
TestAccuracy(y_test_5, y_pred_dummy)
# # do some numerical comparison here, like
# # if fabs(a0-a1)<eps then ..
TestAccuracy() simple example: my a =0.75 scikit-learn a=0.75 TestAccuracy() on sdg: my a =0.9492 scikit-learn a=0.9492 TestAccuracy() on dummy: my a =0.9108 scikit-learn a=0.9108
Qb Implement Precision, Recall and $F_1$-score and test it on the MNIST data for both the SGD and Dummy classifier models¶
In the following section a function and test function is created in the same way as with the previous function from Qa. This is done for the precision-, recall- and F1-score respectively.
# TODO: Qb..
import sklearn.metrics as metrics
def MyPrecision(y_true, y_pred):
#predicted condition positive. All the predicted trues
pcp = [item for item in y_pred if item == True]
if len(pcp)==0:
return 0
y_correctPred=[]
for i in range(len(y_true)):
if y_true[i]==True and y_pred[i]==True:
y_correctPred.append(y_pred[i])
return len(y_correctPred)/len(pcp)
def TestPrecision(y_true, y_pred):
if len(y_true) != len(y_pred):
print("Error: Input arrays have different lengths.")
return
p0=MyPrecision(y_true, y_pred)
p1=metrics.precision_score(y_true, y_pred)
print(f"\nmy p ={p0}")
print(f"scikit-learn p={p1}")
print("\nTestPrecision() simple example:")
TestPrecision([True,True,False,True], [True,True,True,False])
print("\nTestPrecision() on sdg:")
TestPrecision(y_test_5, y_pred_sdg)
print("\nTestPrecision() on dummy:")
TestPrecision(y_test_5, y_pred_dummy)
TestPrecision() simple example: my p =0.6666666666666666 scikit-learn p=0.6666666666666666 TestPrecision() on sdg: my p =0.6618887015177066 scikit-learn p=0.6618887015177066 TestPrecision() on dummy: my p =0 scikit-learn p=0.0
c:\Users\oscar\AppData\Local\Programs\Python\Python311\Lib\site-packages\sklearn\metrics\_classification.py:1469: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 due to no predicted samples. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
the precision metric can be used to identify the ratio between true positives and false positives. We see in the simple example that the methods filter out the predicted false, and just evaluates the 3 trues. This results in a score of 66% since one is a false positive. By using it on the data from the dummy exercise we furthermore see that the dummy get 0% because it only guesses false and the SDG get about 2/3. This means that about 1/3 of the numbers the algorithm thinks is a five actually arent a 5. In the real world this would be horrible if it fx was used to convict crimminals where you are innocent until proven guilty.
def MyRecall(y_true, y_pred):
#condition positive
cp = [item for item in y_true if item == True]
if len(cp)==0:
return 0
y_correctPred=[]
for i in range(len(y_true)):
if y_true[i]==True and y_pred[i]==True:
y_correctPred.append(y_pred[i])
return len(y_correctPred)/len(cp)
def TestRecall(y_true, y_pred):
if len(y_true) != len(y_pred):
print("Error: Input arrays have different lengths.")
return
r0=MyRecall(y_true, y_pred)
r1=metrics.recall_score(y_true, y_pred)
print(f"\nmy r ={r0}")
print(f"scikit-learn r={r1}")
print("\nTestRecall() simple example:")
TestRecall([True,True,True,True], [True,True,True,False])
print("\nTestRecall() on sdg:")
TestRecall(y_test_5, y_pred_sdg)
print("\nTestRecall() on dummy:")
TestRecall(y_test_5, y_pred_dummy)
TestRecall() simple example: my r =0.75 scikit-learn r=0.75 TestRecall() on sdg: my r =0.8800448430493274 scikit-learn r=0.8800448430493274 TestRecall() on dummy: my r =0.0 scikit-learn r=0.0
The recall metric tells us about the ratio between true positives and false negatives. Through this metric we can observe how many positives fall through and therefore aren't noticed by the algorithm. The results show us that the sdg-is quite good at finding the 5's very few 5's slip through the algorithm even though it also identifies some numbers as 5's that arent 5's. In the real world it would be important in fx sickness detection where it is important to find and treat all the cases and it is probably fine if a a few false positives are sent through further investigation.
def MyF1Score(y_true, y_pred):
p = MyPrecision(y_true, y_pred)
r = MyRecall(y_true, y_pred)
if p==0 or r==0:
return 0
return 2/((1/p)+(1/r))
def TestF1Score(y_true, y_pred):
if len(y_true) != len(y_pred):
print("Error: Input arrays have different lengths.")
return
f0=MyF1Score(y_true, y_pred)
f1=metrics.f1_score(y_true, y_pred)
print(f"\nmy f ={f0}")
print(f"scikit-learn f={f1}")
print("\nTestF1Score() simple example:")
TestF1Score([True,True,False,True], [True,False,True,False])
print("\nTestF1Score() on sdg:")
TestF1Score(y_test_5, y_pred_sdg)
print("\nTestF1Score() on dummy:")
TestF1Score(y_test_5, y_pred_dummy)
TestF1Score() simple example: my f =0.4 scikit-learn f=0.4 TestF1Score() on sdg: my f =0.7555341674687199 scikit-learn f=0.75553416746872 TestF1Score() on dummy: my f =0 scikit-learn f=0.0
The F1 score takes both the recall, and the precision into account which means the sdg-score lands somewhere between the two scores. Through this metric we identify how often false positives and false negatives occur without looking at the true negatives.
Qc The Confusion Matrix¶
In this section we will construct 3 confusion matrices. One for the both the sdg and the dummy, aswell as one where the input params are switched for the sdg confusion matrices.
# TODO: Qc
from sklearn.metrics import confusion_matrix
cm_sdg = confusion_matrix(y_test_5, y_pred_sdg)
cm_sdg_reversed = confusion_matrix(y_pred_sdg, y_test_5)
cm_dummy = confusion_matrix(y_test_5, y_pred_dummy)
print("sdg:")
print(cm_sdg)
print("\ndummy:")
print(cm_dummy)
print("\nsdg reversed:")
print(cm_sdg_reversed)
sdg: [[8707 401] [ 107 785]] dummy: [[9108 0] [ 892 0]] sdg reversed: [[8707 107] [ 401 785]]
In the above results we can see the matrices follow the binary system where 0=false and 1=true. From the dummy matrix we can gather the columns are the predicted values since there are none in column 1 (0-indexed) and there fore the rows must be the true condition. This means the first spot is the true negatives and that adds up with what is known about the dummys result so far. Furthermore we see that if we swap the input parameters the false positives and true negatives swap places. But the accuracy stays the same.
Qd A Confusion Matrix Heat-map¶
Now we will generate a heat map for this confusion matrices.
from sklearn.metrics import ConfusionMatrixDisplay
plt.rc('font', size=9) # extra code ā make the text smaller
ConfusionMatrixDisplay.from_predictions(y_test_5,y_pred_dummy)
plt.show()
ConfusionMatrixDisplay.from_predictions(y_test_5,y_pred_sdg)
plt.show()
Qe Conclusion¶
In this exercise, we evaluated classification algorithms using custom Python metrics and scikit-learn's functions. We assessed precision, accuracy, recall, and the F1-score for an SGD classifier and a Dummy classifier. The SGD classifier outperformed the Dummy classifier in all metrics. However the dummy clasifier was significantly closer in accuracy. Therefore this exercise was meant to show us some important metrics to use when evaluating your algorithm. These metrics are of course use case specific. Fx it could be a good idea to have a high precision when judging criminals or have a high recall/sensitivity when scanning for diseases. Furthermore the F1-score is usefull if you don't care as much about the true negatives as it takes both into account. As is the case for the "is it a 5?"-algorithm and therefore the F1 provides us with a more accurate assesment of how useful the sdg algorithm and dummy-classifier are with thier ~75% and 0% respectively
Furthermore Confusion matrices and heatmaps provided detailed insights into their performance, and granted us a good visual component for the metrics. These will be important further in the course when we analyze more complex systems with more parameters.
Resume: Look at the Big Picture¶
You have to frame the problem, and figure out what you are trying to solve. What is the core issue? Is it a reggression or classification task? Furthermore you have to select performance messures. fx Accuracy isn't very good when evaluating a digit evaluator. finally you have to check your assumptions. If you assume it is a regression task, but find out it is converted to a classification task down stream. Then that really sucks.
Resume: Get the Data¶
This segment talks about how to use jupyter notebooks which is presumed to be the thing we should skip summarising. When you aquire your data you should take a look at a few elements to ensure you know what types of data you are dealing with. The method describe() or hist can also give som standard statistical analysis to help you get a feel for the data. At this stage you should also create you test set, by shuffling the data and setting about 20% aside for testing. It needs to be shuffled so the algorithm doesn't just figure out every number is larger than the last fx. You should seed your shuffling so you get the same one each time. Otherwise if you train an algorithm on the same set several times it will end up knowing the whole set. You also need to ensure you data is representative of the true data population, otherwise you should consider stratifying your dataset.
Resume: Explore and Visualize the Data to Gain Insights,¶
This segment is about data exploration and visualization to grain insight in the data. We have to get a more in depth about the data, which only do for the traing sets. It is a good idea to visualize the data through heatmaps/histograms. You may need to set the opacity down or color different segment for finding the patterns. You will also have to look at the correlations between the different value, but only if the dataset is not too large. The correlation coefficient ranges -1 to 1 and if the correlation is close to -1 or 1 it tells there is correlation between the data. Zero mean there is no linear correlation. The last part of this segment is about experiment with attribute combinations. Some atrributes maybe don't make sence they stand alone, so maybe combine them.
Resume: Prepare the Data for Machine Learning Algorithms¶
You need to find a strategy for handling missing datapoints such as entries missing some attributes. You can either remove them, use the median, or if you want to get fancy use the k-nearest neighbour. Furthermore machine learning models prefer numbers, and tend to skew towards numbers with larger intervals. Therfore it could be a good idea to standerdize your data to be between -1 - 1. This can be done by scaling all the
Resume: Select and Train a Model¶
You are now ready to select and train a model. Firstly try a simple model as linear regression, however some values may be way off. These data can be hard to understand, so we are going to use some tools for validation. This is can be MAE or RMSE. Another problem with the data if examples used the k-nearstnieghbor is overfitting or underfitting the training data. The main way to fix underfitting is try using a more complex model and how it proforms. The problem can also be the data is too small and one way to help simulate more date is it use the k-fold cross-validation, it spilt the data into nonoverlapping subsets. The model can now traing for every subsets and all the different score for each subsets put together give a value for its preformance.
Resume: Fine-Tune Your Model¶
After training the models, we have to fine tune the. For finetuning the model, you can manually try to find a great combination for the hyperparameter. Or use the GridSearchCV if you have a few hyperparameter or the RandomizedSearchCV for testing alot of hyperparameter.
Another way to fine-tune your system is combine the models that performs best as a group. This mothod is called the Ensemble. It will often preform better the individual models.
You may need to look into your best models and understand their errors. It can be some of the features don't make sense to have, so maybe you want to drop them. Or maybe it is because you are missing some features and need more data. May need to look into outliers.
When you system if preforming well on the train set, it may be time to try evaluating your system on the test set. After testing you will need to evalute the scores. The system may proferm really good on the test set or not. If it doesn't you may need to go back and finetune even more. If the system preform very well, then may need to make it ready for launch.
Resume: Launch, Monitor, and Maintain Your System¶
If your system came to this step, you will need to get it ready for production. For a system to go into it need to be launch, monitor and maintain. This can be done differenly ways. First way is that you can create documentation and conduct tests and integrate it. The alternative approach is using a model on cloud platform.
It is not the last step to just deploying it. You will have to collect new fresh data and keep training the model and evaluating performance. It is import to catch potiontial issuses early. It is also a good idea to have a backup of the models in a database. The models can end up being corrupted.
Resume: Try It Out!.¶
Now you have read all the a both segment, you will have a understanding of what machine learning project look like. You can now see how much goes into making a grate system. The machine learning algorigthms are important, however it is preferable to be comfortable with the hole process.
SWMAL Opgave
Dataanalyse¶
Denne opgave er skrevet pƄ dansk efter aftale med Carsten
Qa) Beskrivelse af datasæt til O4 projekt¶
I forbindelse med vores O4 projekt har vi valgt at arbejde med image recognition. Vi vil udvikle en model, som kan analysere og identificere de forskellige vejrforhold pƄ billeder. Dette kan vƦre Cloudy, Rainy, Sunny eller Sunrise. For at opnƄ dette mƄl vil vi at bruge en klassifikationsmetode, hvor billeder kategoriseres, sƄ vores model kan trƦnes til at forudsige vejret, nƄr det prƦsenteres med et nyt billede.
Vi stÄr over for en multi-class image classification-opgave, da der er fire forskellige vejrtilstande, som skal genkendes. Denne opgave vil blive løst ved hjælp af supervised learning, hvor vi træner vores model vha. et datasæt
For at lave denne model har vi hentet data fra kaggle, som er en platform, der udbyder store dataset. Vi har et datasƦt som bestƄr af ca. 1200 billeder, opdelt i de 4 kategorier. Der vil vƦre ca. 300 samples pr. kategori til at trƦne/teste vores model. Disse samples bestƄr af forskellige billeder, som hver isƦr illustrerer en af de fire kategorier af vejr, Sunny, Rainy, Cloudy og Sunrise, som vil vƦre vores target vƦrdier.
DatasƦttet vil derfor blive opdelt i tre sƦt: trƦningssƦt til at trƦne modellen, valideringssƦt til at finjustere modelparametre og teste ydeevnen, og testsƦt til endelig evaluering af modellen.
NƄr vi arbejder med image recognition, kan det vƦre lidt svƦrere at identificere fejl i dataene. Der kan forekomme eventuelle fejl og usikkerheder i form af belysning, synsvinkler eller andre faktorer, som vil pƄvirke billedernes kvalitet. Det kan ogsƄ vƦre, at et af billederne er havnet i den forkerte kategori i trƦningssƦttet, altsƄ er der fejl i labels. PƄ denne mƄde vil vores model ikke trƦnes korrekt til at predict vores targetvƦrdier. Der vil blive lavet en dataanalyse af vores datasƦt for at identificere korrupte data.
Dette projekt sigter altsƄ mod at udvikle en image recognition-model til at identificere vejrforhold pƄ billeder ved hjƦlp af supervised learning.
Qb) Dataanalyse af eget datasæt¶
Her opsƦttes dataanalysen, som forklares i slutningen af denne opgave.
#make function to fetch dataset
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
def getDataSet(categories: list, folder: str):
images = []
labels = []
for category in categories:
folder_path = os.path.join(folder, category)
label = categories.index(category) # Assign a numerical label to each category
for filename in os.listdir(folder_path):
if filename.endswith(".jpg"):
img_path = os.path.join(folder_path, filename)
img = cv2.imread(img_path)
if img is not None:
img = cv2.resize(img, (128, 128)) # Resize to a common size
img = img / 255.0 # Normalize pixel values to [0, 1]
images.append(img)
labels.append(label)
else:
print(f"Failed to load image: {img_path}")
return images, labels
Der er implementeret en funktion getDataSet(), som tager en liste med kategorier og en string, som er navnet pƄ mappen. Den vil give hvert image et label baseret pƄ kategori, og index. Derudover vil den prƦprocesserer billederne, og sorterer de korrupte filer fra.
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
! ls
# ! dir
data_folder = "weather_dataset"
categories = ["Shine", "Cloudy", "Rain", "Sunrise"]
images, labels = getDataSet(categories, data_folder)
# Convert the lists to NumPy arrays
images = np.array(images)
labels = np.array(labels)
# Assuming 'labels' is a list or NumPy array of category labels
# You can create histograms of the labels to visualize the class distribution
plt.hist(labels, bins=len(np.unique(labels))) # Use as many bins as unique labels
plt.xlabel('Category')
plt.ylabel('Count')
plt.title('Class Distribution')
plt.xticks(np.unique(labels)) # Set x-ticks to unique class labels
plt.show()
# Assuming 'images' is a NumPy array containing the image data
# You can create a histogram of pixel values for one image (e.g., the first image)
print(f"Category for image 0: {categories[labels[0]]}")
plt.imshow(images[0])
plt.axis('off') # Optional: Turn off the axis labels and ticks
plt.title(categories[labels[0]])
plt.show()
plt.hist(images[0].ravel(), bins=256, range=(0, 1)) # Assuming normalized pixel values in [0, 1]
plt.xlabel('Pixel Value')
plt.ylabel('Frequency')
plt.title('Pixel Value Histogram')
plt.show()
Data dataanalyse.ipynb pipelines.ipynb weather_dataset Failed to load image: weather_dataset/Shine/shine131.jpg Failed to load image: weather_dataset/Rain/rain141.jpg
Category for image 0: Shine
Der er lavet to histrogrammer. Et til at give en oversigt over hvor meget data, der er til hver kategori, og et til at vise pixel feature pƄ et billede.
Efter frafiltreringen af de billeder der ikke kunne loades ses det pƄ histogrammet, at der er mellem 200-350 billeder inden for hver kategori. Dette vurderer vi til at vƦre en tilstrƦkkelig mƦngde af hver kategori til at kunne trƦne en billedgenkender og stadig have 100 billeder tilovers til testing.
Der er valgt at lave et histogram for image[0], som hører under kategorien "Shine". PÄ dette histogram kan vi se hvordan frekvens af pixels pÄ billedet er fordelt ud over pixel value 0-1. Pixel value viser hvor mørke eller lyse farverne pÄ pixelsne er, hvor 0 er helt mørkt og 1 er helt hvidt.
PÄ histogrammet ser vi, at der er størst frekvens ved Pixel value 1, samt at frekvensen stiger mere value. Dette giver god mening, da billedet skal repræsentere Shine, som gerne skulle være meget lyst. Histogrammet stemmer derfor overens med billedet.
SWMAL Exercise¶
Pipelines¶
$$
\newcommand\rem[1]{}
\rem{SWMAL: CEF def and LaTeX commands, remember: no newlines in defs}
\newcommand\eq[2]{#1 &=& #2\\}
\newcommand\ar[2]{\begin{array}{#1}#2\end{array}}
\newcommand\ac[2]{\left[\ar{#1}{#2}\right]}
\newcommand\st[1]{_{\scriptsize #1}}
\newcommand\norm[1]{{\cal L}_{#1}}
\newcommand\obs[2]{#1_{\text{\scriptsize obs}}^{\left(#2\right)}}
\newcommand\diff[1]{\text{d}#1}
\newcommand\pown[1]{^{(#1)}}
\def\pownn{\pown{n}}
\def\powni{\pown{i}}
\def\powtest{\pown{\text{\scriptsize test}}}
\def\powtrain{\pown{\text{\scriptsize train}}}
\def\bX{\mathbf{M}}
\def\bX{\mathbf{X}}
\def\bZ{\mathbf{Z}}
\def\bw{\mathbf{m}}
\def\bx{\mathbf{x}}
\def\by{\mathbf{y}}
\def\bz{\mathbf{z}}
\def\bw{\mathbf{w}}
\def\btheta{{\boldsymbol\theta}}
\def\bSigma{{\boldsymbol\Sigma}}
\def\half{\frac{1}{2}}
$$
We are going to implement a min/max scaler for a multilayer perceptron (MLP). The MLP is designed to receive input data within specific ranges: [0;1] or [-1;1]. The min/max scaler will adjust the data accordingly, ensuring that it falls within the expected range and preventing saturation to the minimum or maximum values.
%matplotlib inline
import sys
import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
def LoadDataFromL01():
filename = "Data/itmal_l01_data.pkl"
with open(f"{filename}", "rb") as f:
(X, y) = pickle.load(f)
return X, y
X, y = LoadDataFromL01()
print(f"X.shape={X.shape}, y.shape={y.shape}")
assert X.shape[0] == y.shape[0]
assert X.ndim == 2
assert y.ndim == 1 # did a y.ravel() before saving to picke file
assert X.shape[0] == 29
# re-create plot data (not stored in the Pickel file)
m = np.linspace(0, 60000, 1000)
M = np.empty([m.shape[0], 1])
M[:, 0] = m
print("OK")
X.shape=(29, 1), y.shape=(29,) OK
# Setup the MLP and lin. regression again..
def isNumpyData(t: np.ndarray, expected_ndim: int):
assert isinstance(expected_ndim, int), f"input parameter 'expected_ndim' is not an integer but a '{type(expected_ndim)}'"
assert expected_ndim>=0, f"expected input parameter 'expected_ndim' to be >=0, got {expected_ndim}"
if t is None:
print("input parameter 't' is None", file=sys.stderr)
return False
if not isinstance(t, np.ndarray):
print("excepted numpy.ndarray got type '{type(t)}'", file=sys.stderr)
return False
if not t.ndim==expected_ndim:
print("expected ndim={expected_ndim} but found {t.ndim}", file=sys.stderr)
return False
return True
def PlotModels(model1, model2, X: np.ndarray, y: np.ndarray, name_model1: str, name_model2: str):
# NOTE: local function is such a nifty feature of Python!
def CalcPredAndScore(model, X: np.ndarray, y: np.ndarray,):
assert isNumpyData(X, 2) and isNumpyData(y, 1) and X.shape[0]==y.shape[0]
y_pred_model = model.predict(X)
score_model = r2_score(y, y_pred_model) # call r2
return y_pred_model, score_model
assert isinstance(name_model1, str) and isinstance(name_model2, str)
y_pred_model1, score_model1 = CalcPredAndScore(model1, X, y)
y_pred_model2, score_model2 = CalcPredAndScore(model2, X, y)
plt.plot(X, y_pred_model1, "r.-")
plt.plot(X, y_pred_model2, "kx-")
plt.scatter(X, y)
plt.xlabel("GDP per capita")
plt.ylabel("Life satisfaction")
plt.legend([name_model1, name_model2, "X OECD data"])
l = max(len(name_model1), len(name_model2))
print(f"{(name_model1).rjust(l)}.score(X, y)={score_model1:0.2f}")
print(f"{(name_model2).rjust(l)}.score(X, y)={score_model2:0.2f}")
# lets make a linear and MLP regressor and redo the plots
mlp = MLPRegressor(hidden_layer_sizes=(10, ),
solver='adam',
activation='relu',
tol=1E-5,
max_iter=100000,
verbose=False)
linreg = LinearRegression()
mlp.fit(X, y)
linreg.fit(X, y)
print("The MLP may mis-fit the data, seen in the, sometimes, bad R^2 score..\n")
PlotModels(linreg, mlp, X, y, "lin.reg", "MLP")
print("\nOK")
The MLP may mis-fit the data, seen in the, sometimes, bad R^2 score..
lin.reg.score(X, y)=0.73
MLP.score(X, y)=-48656.97
OK
Qa) Create a Min/max scaler for the MLP¶
This first version we are creating is the default version si most simplified solution. Here we take the max and min value from X. From that we are calculating the standardization.
$$ \ar{rl}{ \text{x'} &= \frac{\text{x}-\mu_x}{\sigma_x} } $$
Now we can re-train the MLP and re-plot with the preprocess data.
maxvalue= X.max()
print(f"maxvalue={maxvalue}")
minvalue= X.min()
print(f"minvalue={minvalue}")
X_scaled = (X-minvalue)/(maxvalue-minvalue)
mlp.fit(X_scaled, y)
linreg.fit(X_scaled, y)
print("The MLP may mis-fit the data, seen in the, sometimes, bad R^2 score..\n")
PlotModels(linreg, mlp, X_scaled, y, "lin.reg", "MLP")
print("\nOK")
maxvalue=55805.204000000005
minvalue=9054.914
The MLP may mis-fit the data, seen in the, sometimes, bad R^2 score..
lin.reg.score(X, y)=0.73
MLP.score(X, y)=0.73
OK
Qb) Scikit-learn Pipelines¶
Now, we will perform the re-scaling again, this time using a pipeline and a scaler from sklearn. We will be utilizing sklearn.pipeline.Pipeline and sklearn.preprocessing.MinMaxScaler. The pipeline acts as an intelligent composite constructor, which reduces the number of variables involved. This is achieved by directly passing the scaled values into the model we intend to train. This is done by calling the pipe.fit().
Again we are using the PlotModels() function to retraing the linear regressor on the scaled data.
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
pipe = Pipeline(
[
('Scaler', MinMaxScaler()),
('mlp',mlp)
]
)
pipe.fit(X, y)
linreg.fit(X, y)
PlotModels(linreg, pipe, X, y, "lin.reg", "MLP")
lin.reg.score(X, y)=0.73
MLP.score(X, y)=0.72
Qc) Outliers and the Min-max Scaler vs. the Standard Scaler¶
The fundamental problem with the min/max scaler is it linearly transforms the data to fit within a specified range [0;1] or [-1;1]. This means the min and max value often can be set by outliers and can cause the data to be distortion of the overall distribution. It can be a single outliers with an extremly low value, the min/max scaler will strech to fit the one outlier. So the rest of the data is getting a more narrow range, this can lead to potentially losing important information.
On the other hand we have the standard scaler, which is less sensitive to ouliers. This is because the standard scale preforms z-score normalization, which is part of statics. In other words this processs center the data around 0 and scales it based on its variance. So the bigger standard variance the less influence on the scaling.
from sklearn.preprocessing import StandardScaler
pipe = Pipeline([('Scaler', StandardScaler()), ('mlp',mlp)])
pipe.fit(X, y)
linreg.fit(X, y)
PlotModels(linreg, pipe, X, y, "lin.reg", "MLP")
lin.reg.score(X, y)=0.73
MLP.score(X, y)=0.80
Qd) Modify the MLP Hyperparameters¶
Now we want to try chaning the different hyperparametes associated with the MLP. We are going to try chaning the amount of neurons, solver and the activation. We will use the $R^2$. to check if the output still is sensible.
Firstly try new amount of neurons, we are chaning from 10 to 5.
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
mlp = MLPRegressor(hidden_layer_sizes=(5, ),
solver='adam',
activation='relu',
tol=1E-5,
max_iter=100000,
verbose=False)
pipe = Pipeline(
[
('Scaler', MinMaxScaler()),
('mlp',mlp)
]
)
pipe.fit(X, y)
linreg.fit(X, y)
PlotModels(linreg, pipe, X, y, "lin.reg", "MLP")
lin.reg.score(X, y)=0.73
MLP.score(X, y)=0.73
We can observe that the MLP still prefromce fine with 5 neurons. So we are now testing for only one neurons.
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
mlp = MLPRegressor(hidden_layer_sizes=(1, ),
solver='adam',
activation='relu',
tol=1E-5,
max_iter=100000,
verbose=False)
pipe = Pipeline(
[
('Scaler', MinMaxScaler()),
('mlp',mlp)
]
)
pipe.fit(X, y)
linreg.fit(X, y)
PlotModels(linreg, pipe, X, y, "lin.reg", "MLP")
lin.reg.score(X, y)=0.73
MLP.score(X, y)=0.70
The $R^2$ is only a little amount worse than the linear regression.
Now we are going to try using a new activation functions. This is a mathematical operations applied to the output of a neuron in a neural network. This helps introducing a non-linearity into the network. This allows the neural network to learn and model complex relationships btween input and output. We are using the logistic also called simoid function. The Formula is $$ \ar{rl}{ f(x) &= \frac{1}{1+(e)^{-x}} } $$
The formula output range is between [0;1]
mlp = MLPRegressor(hidden_layer_sizes=(10, ),
solver='adam',
activation='logistic',
tol=1E-5,
max_iter=100000,
verbose=False)
pipe = Pipeline(
[
('Scaler', MinMaxScaler()),
('mlp',mlp)
]
)
pipe.fit(X, y)
linreg.fit(X, y)
PlotModels(linreg, pipe, X, y, "lin.reg", "MLP")
lin.reg.score(X, y)=0.73
MLP.score(X, y)=0.72
Chaning the activation function to be logistic the $R^2$ is only a little amount worse than the linear regression. You can obsever the shape is non-linear.
Now we want to try to use a different solver. We are going with the stochastic gradient descentsgd instead of the adam. This is a algorithm that is often use in matchine learning. It is a variant of the standard gradient descent algorithm.
mlp = MLPRegressor(hidden_layer_sizes=(10, ),
solver='sgd', # Try different solvers
activation='relu',
tol=1E-5,
max_iter=100000,
verbose=False)
pipe = Pipeline(
[
('Scaler', MinMaxScaler()),
('mlp',mlp)
]
)
pipe.fit(X, y)
linreg.fit(X, y)
PlotModels(linreg, pipe, X, y, "lin.reg", "MLP")
lin.reg.score(X, y)=0.73
MLP.score(X, y)=0.50
SWMAL Exercise¶
Training a Linear Regressor I¶
The goal of the linear regression is to find the argument $w$ that minimizes the sum-of-squares error over all inputs.
$$ \def\rem#1{} \rem{ITMAL: CEF def and LaTeX commands, remember: no newlines in defs} \def\eq#1#2{#1 &=& #2\\} \def\ar#1#2{\begin{array}{#1}#2\end{array}} \def\ac#1#2{\left[\ar{#1}{#2}\right]} \def\st#1{_{\textrm{\scriptsize #1}}} \def\norm#1{{\cal L}_{#1}} \def\obs#1#2{#1_{\textrm{\scriptsize obs}}^{\left(#2\right)}} \def\diff#1{\mathrm{d}#1} \def\pown#1{^{(#1)}} \def\pownn{\pown{n}} \def\powni{\pown{i}} \def\powtest{\pown{\textrm{\scriptsize test}}} \def\powtrain{\pown{\textrm{\scriptsize train}}} \def\bX{\mathbf{M}} \def\bX{\mathbf{X}} \def\bZ{\mathbf{Z}} \def\bw{\mathbf{m}} \def\bx{\mathbf{x}} \def\by{\mathbf{y}} \def\bz{\mathbf{z}} \def\bw{\mathbf{w}} \def\btheta{{\boldsymbol\theta}} \def\bSigma{{\boldsymbol\Sigma}} \def\half{\frac{1}{2}} \def\pfrac#1#2{\frac{\partial~#1}{\partial~#2}} \def\dfrac#1#2{\frac{\mathrm{d}~#1}{\mathrm{d}#2}} \bw^* ~=~ \left( \bX^\top \bX \right)^{-1} \bX^\top \by $$
Qa Write a Python function that uses the closed-form to find $\bw^*$¶
We are going to make a function GetTheNormalEquation(X, y). The function takes a training dataset, where we have the matrixX and the vector of the label y and returns the w via the closed-form.
The function is going to to use the normal equation as presented below:
$$ \bw^* ~=~ \left( \bX^\top \bX \right)^{-1} \bX^\top \by $$
Inside the function we make a variable called X_b which is the concatenate column of ones to X. In other words it adds the bias term.
Use the test data, X and y in the code below to find w via the closed-form. Use the test vectors for w to test your implementation, and remember to add the bias term (concat an all-one vector to X before solving).
The next line where we store variable w we simply use the normal equation. It constists of multiple parts. The np.linalg.inv() calculates the inverse of the matrix obtained from the
$$
\left( \bX^\top \bX \right)^{-1}
$$.
The product of np.linalg.inv() is multiplied with the rest, which is the transpose of the input data matrix with the target output vector. It results in a vector of size (d+1).
$$ \bX^\top \by $$
import sys,os
sys.path.append(os.path.expanduser('../'))
import numpy as np
from libitmal import utils as itmalutils
def GetOS():
return dir if os.name == 'nt' else ls
# The Normal Equation p.134
def GetTheNormalEquation(X, y):
X_b = np.c_[np.ones((X.shape[0], 1)), X] # Concatenate a column of ones to X
w = np.linalg.inv(X_b.T @ X_b) @ (X_b.T) @ (y)
return w
def GenerateData():
X = np.array([[8.34044009e-01],[1.44064899e+00],[2.28749635e-04],[6.04665145e-01]])
y = np.array([5.97396028, 7.24897834, 4.86609388, 3.51245674])
return X, y
X, y = GenerateData()
w = GetTheNormalEquation(X, y)
# TEST VECTOR:
w_expected = np.array([4.046879011698, 1.880121487278])
itmalutils.PrintMatrix(w, label="w=", precision=12)
itmalutils.AssertInRange(w, w_expected, eps=1E-9)
print("OK")
w=[4.046879011698 1.880121487278] OK
Qb Find the limits of the least-square method¶
The problem with calculating of a matrix inverse can be compationally expensive. This is especially the case for larger or nearly singular matrices. A nearly singular matrix is one that is almost singular, which means it does not have a true inverse.
In the code below we have a function GenerateData(M, N). The given code The parameter M was set to 10000 instead of 1000, which provide a singular matrix and can not be calculated using the GetTheNormalEquation(X, y)
The reason it takes such a long time is the computational complexity, meaning doubling the number of features then you have to multiply the computation time be rougly 22.4
from sklearn.linear_model import LinearRegression
def GenerateData(M, N):
# TEST DATA: Matrix, taken from [HOML]
print(f'GenerateData(M={N}, N={N})...')
assert M>0
assert N>0
assert isinstance(M, int)
assert isinstance(N, int)
# NOTE: not always possible to invert a random matrix;
# it becomes sigular, hence a more elaborate choice
# of values below (but still a hack):
X=2 * np.ones([M, N])
for i in range(X.shape[0]):
X[i,0]=i*4
for j in range(X.shape[1]):
X[0,j]=-j*4
y=4 + 3*X + np.random.randn(M,1)
y=y[:,0] # well, could do better here!
return X, y
X, y = GenerateData(M=10000, N=20)
w = GetTheNormalEquation(X, y)
# Print w
itmalutils.PrintMatrix(w, label="w=", precision=12)
print("OK")
GenerateData(M=20, N=20)...
w=[ 1666387.5502728098 2.440137876938 364127.55597364274
-1027466.0868604467 -355721.375006271 54929.827803658765
-379931.0885377476 82890.96006455205 -260862.72574724632
10940.279039915009 -169174.47809120276 433595.0309961927
-130711.1726267726 275061.5205127626 -50454.66184441206
-149800.39751745152 107994.28902418479 91549.58807911839
4787.796794080489 37335.08970069502 -43486.08680883595 ]
OK
When increasing w, the following error occurs: LinAlgError: Singular matrix
SWMAL Exercise¶
Gradient Descent Methods and Training¶
$$ \def\rem#1{} \rem{ITMAL: CEF def and LaTeX commands, remember: no newlines in defs} \def\eq#1#2{#1 &=& #2\\} \def\ar#1#2{\begin{array}{#1}#2\end{array}} \def\ac#1#2{\left[\ar{#1}{#2}\right]} \def\st#1{_{\textrm{\scriptsize #1}}} \def\norm#1{{\cal L}_{#1}} \def\obs#1#2{#1_{\textrm{\scriptsize obs}}^{\left(#2\right)}} \def\diff#1{\mathrm{d}#1} \def\pown#1{^{(#1)}} \def\pownn{\pown{n}} \def\powni{\pown{i}} \def\powtest{\pown{\textrm{\scriptsize test}}} \def\powtrain{\pown{\textrm{\scriptsize train}}} \def\bX{\mathbf{M}} \def\bX{\mathbf{X}} \def\bZ{\mathbf{Z}} \def\bw{\mathbf{m}} \def\bx{\mathbf{x}} \def\by{\mathbf{y}} \def\bz{\mathbf{z}} \def\bw{\mathbf{w}} \def\btheta{{\boldsymbol\theta}} \def\bSigma{{\boldsymbol\Sigma}} \def\half{\frac{1}{2}} \def\pfrac#1#2{\frac{\partial~#1}{\partial~#2}} \def\dfrac#1#2{\frac{\mathrm{d}~#1}{\mathrm{d}#2}} $$
Qa The Gradient Descent Method (GD)¶
Gradient descent is a generic optimization algorithm capable of finding optimal solutions to a wide range of problems. Which means it is an iterative optimization algorithm used to find the minimum of a function. The function used in this case is the cost function J(w). The idea is based on taking steps proportional to the negative of the gradient of the function at the current point
The code below firstly imports all the libraries. Secondly theres is created a new function GenerateData() and the function returns 3 values X, X_b, y. GenerateData() generates random data for a linear regression where X represents a matrix of input features and y represents the target values. And atlast we have the X_b and it is the augmented feature matrix with the bias term.
The eta is the learning rate, which is the steps the algorithm takes towards finding the minimum of the cost function. If eta is too large, the algorithm may overshoot the minimum, which can potentially causing it to keep bouncing around near the minimum without settling down or it never reach a stable minimum and keeps running indefinitely. Conversely, if eta is too small, the algorithm may take a very long time to it never reach a stable minimum and keeps running indefinitely or get stuck in a local minimum.
The n_iterations is the number of epochs also called iterations.
The m is the number of samples
The theta is the intial random initializtaion
Inside the forloop we calculate the compute gradients of MSE with respect to theta and then we update the using the gradients and learning rate. This is repeated for the number of iterations.
See the pictures below on the left the learning rate is too low, and will eventually reach the solution, however it takes a long time. The middle the learning rate is looks pretty good, because we can see in a few iterations it has already found a good solution. On the rigth the learning rate is too high, which mean the algorithm diverges. The algorithm is getting further and futher away from the solution.
# NOTE: modified code from [GITHOML], 04_training_linear_models.ipynb
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
def GenerateData():
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)
#X_b represents a matrix of input features
X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance
return X, X_b, y
X, X_b, y = GenerateData()
eta = 0.1 # learning rate
n_iterations = 1000 #num of epochs/iterations
m = 100 # num of samples
theta = np.random.randn(2,1) # random initialization
for iteration in range(n_iterations):
# Compute gradients of MSE with respect to theta
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
# Update theta using the gradients and learning rate
theta = theta - eta * gradients
print(f'stochastic gradient descent theta={theta.ravel()}')
##########################################################
# rest of the code is just for plotting, needs no review
def plot_gradient_descent(theta, eta, theta_path=None):
m = len(X_b)
plt.plot(X, y, "b.")
n_iterations = 1000
for iteration in range(n_iterations):
if iteration < 10:
y_predict = X_new_b.dot(theta)
style = "b-" if iteration > 0 else "r--"
plt.plot(X_new, y_predict, style)
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - eta * gradients
if theta_path is not None:
theta_path.append(theta)
plt.xlabel("$x_1$", fontsize=18)
plt.axis([0, 2, 0, 15])
plt.title(r"$\eta = {}$".format(eta), fontsize=16)
np.random.seed(42)
theta_path_bgd = []
theta = np.random.randn(2,1) # random initialization
X_new = np.array([[0], [2]])
X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance
plt.figure(figsize=(10,4))
plt.subplot(131); plot_gradient_descent(theta, eta=0.02)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(132); plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd)
plt.subplot(133); plot_gradient_descent(theta, eta=0.5)
plt.show()
print('OK')
stochastic gradient descent theta=[4.14149307 2.81305367]
OK
Qb The Stochastic Gradient Descent Method (SGD)¶
Stochastic Gradient Descent is a variant of the Gradient Descaent. The main problem with the Gradient Descent is it uses the whole training set and it is really slow. Wheras the SGD is the opposite. SGD takes a random instance in the training set at every step and computes the gradient based only on the single instance. The problem with the SGD will over time be very closed to the minimum, however will continue to bounce around and never settle down.
The np.random.randint(m) generate a random integer between [0;m). In the context m is the total number of data points in the traning set.
The main differens in the code is the extra forloop in the SGD compared to the GD. This extra forloop iteraters over all elements in the X_b. Another distinction lies in how the gradients are calculated. In GD, they are divided by m. These two reasons are connected since in SGD, we compute gradients each time we go through the for loop.
The theta is calculated the same way, however the gradint is changed
for epoch in range(n_epochs):
for i in range(m):
.
.
.
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = ...
theta = ...
where it for the GD method was just
for iteration in range(n_iterations):
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = ..
# NOTE: code from [GITHOML], 04_training_linear_models.ipynb
theta_path_sgd = []
m = len(X_b)
np.random.seed(42)
n_epochs = 50
t0, t1 = 5, 50 # learning schedule hyperparameters
def learning_schedule(t):
return t0 / (t + t1)
theta = np.random.randn(2,1) # random initialization
for epoch in range(n_epochs):
for i in range(m):
if epoch == 0 and i < 20:
y_predict = X_new_b.dot(theta)
style = "b-" if i > 0 else "r--"
plt.plot(X_new, y_predict, style)
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(epoch * m + i)
theta = theta - eta * gradients
theta_path_sgd.append(theta)
plt.plot(X, y, "b.")
from sklearn.linear_model import SGDRegressor
#sgd_reg = SGDRegressor(max_iter=50, tol=-np.infty, penalty=None, eta0=0.1, random_state=42)
sgd_reg = SGDRegressor(max_iter=50, penalty=None, eta0=0.1, random_state=42)
sgd_reg.fit(X, y.ravel())
print(f'stochastic gradient descent theta={theta.ravel()}')
print(f'Scikit-learn SGDRegressor "thetas": sgd_reg.intercept_={sgd_reg.intercept_}, sgd_reg.coef_={sgd_reg.coef_}')
##########################################################
# rest of the code is just for plotting, needs no review
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([0, 2, 0, 15])
plt.show()
print('OK')
stochastic gradient descent theta=[4.13232694 2.79527929] Scikit-learn SGDRegressor "thetas": sgd_reg.intercept_=[4.09914528], sgd_reg.coef_=[2.87331758]
OK
Qc Adaptive learning rate for $\eta$¶
The learning_schedule() functions is a part of the SGD which takes one parameter as input. It is an adaptive learning rate and it defines how the learning rate
($\eta$). The objective of the learning rate is to start and later reduce the step side. This is done to escape local minimums and later on settle in the global mininmum. The input t is the current iteration number. The t0 is the initial learning rate, which is the larger initital step. The t1 is determeĆne how fast the learning rate decreases. A smaller value make the learning rate decrease more slowly over time.
Another thing you can do with the SGDRegressor is set the learning rate to constant or adative in the hyperparameter.
SGDRegressor(max_iter=1,
eta0=0.0005,
learning_rate="constant", # or 'adaptive' etc.
random_state=42)
Qd Mini-batch Gradient Descent Method¶
The mini-batch gradient descent combines the best aspects of SGD and GD. The mini batch will start with taking random numbes as the SGD and escape from local minima. When the mini-batch is near the global mimimums, it will use the GD method, which is taking steps proportional to the negative of the gradient of the function at the current point
The main advantage of mini batch GD over the SGD you get a preformce boost from the optimization of matrix operations.
The main different is the MGD iterates tough the data in a mini-batches of 20 data at the time, whereas the GD proceses the entire dataset at once and the SGD is only processes one data point at a time. The gradients computation is now devided by the size of the minibatch. From the SGD we introduce the learning rate and it helps to decreases the learning rate as the number of iterations increases. The theta_path_mgd appends the each theta from each training.
# NOTE: code from [GITHOML], 04_training_linear_models.ipynb
import numpy as np
theta_path_mgd = []
n_iterations = 50
minibatch_size = 20
np.random.seed(42)
theta = np.random.randn(2,1) # random initialization
t0, t1 = 200, 1000
def learning_schedule(t):
return t0 / (t + t1)
t = 0
for epoch in range(n_iterations):
shuffled_indices = np.random.permutation(m)
X_b_shuffled = X_b[shuffled_indices]
y_shuffled = y[shuffled_indices]
for i in range(0, m, minibatch_size):
t += 1
xi = X_b_shuffled[i:i+minibatch_size]
yi = y_shuffled[i:i+minibatch_size]
gradients = 2/minibatch_size * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(t)
theta = theta - eta * gradients
theta_path_mgd.append(theta)
print(f'mini-batch theta={theta.ravel()}')
print('OK')
mini-batch theta=[4.04995397 2.7175486 ] OK
Qe Choosing a Gradient Descent Method¶
GD is good to get to the minimum, however it can be stuck in local minimum (if they occur). In the pictures below there is at lot of blue computation, which is expensive in a large datasets.
SGD is fast to convergence due to the frequent parameter update and is good for large datasets as it only processes one data point at a time. The problem with the SGD is willl over time be very close to the minimum, however will continues bounce around and never settel down.
MGD as is a compromise between GD and SGD. The only bad thing about is it requires tuning of batch size as a hyperparameter. The picture below illustate a good use of the MGD
# NOTE: code from [GITHOML], 04_training_linear_models.ipynb
theta_path_bgd = np.array(theta_path_bgd)
theta_path_sgd = np.array(theta_path_sgd)
theta_path_mgd = np.array(theta_path_mgd)
plt.figure(figsize=(7,4))
plt.plot(theta_path_sgd[:, 0], theta_path_sgd[:, 1], "r-s", linewidth=1, label="Stochastic")
plt.plot(theta_path_mgd[:, 0], theta_path_mgd[:, 1], "g-+", linewidth=2, label="Mini-batch")
plt.plot(theta_path_bgd[:, 0], theta_path_bgd[:, 1], "b-o", linewidth=3, label="Batch")
plt.legend(loc="upper left", fontsize=16)
plt.xlabel(r"$\theta_0$", fontsize=20)
plt.ylabel(r"$\theta_1$ ", fontsize=20, rotation=0)
plt.axis([2.5, 4.5, 2.3, 3.9])
plt.show()
print('OK')
OK
SWMAL Exercise¶
Artificial Neural Networks as Universal Approximators¶
In this exercise we will test the function of ANN as an universal approximator. We will use a simply ANN consisting of two neurons in a hidden layer.
We use the tanh activation function in the neuron. It can only approximate similar to a monotonic function. When we apply two neurons in a pair, they would be able to approximate an up-hill-then-downhill non monotonic function.
This is a function with a single maximum.
Notice the lack of a train-test split in the exercise; since we only want to look at the approximation capabilities of the MLP, the train-test split is omitted, (and you are welcome to do the split yourself, and also to add noise in the data generators.)
# One data generator just to test out the MLP..
# An MLP with just two neurons should be able to approximate this simple
# down-up graph using its two non-linear sigmoid or tanh neurons...
import matplotlib.pyplot as plt
import numpy as np
def GenerateSimpleData():
X = np.linspace(-10, 10, 100)
y = 2*np.tanh(2*X - 12) - 3*np.tanh(2*X - 4)
y = 2*np.tanh(2*X + 2) - 3*np.tanh(2*X - 4)
X = X.reshape(-1, 1) # Scikit-algorithms needs matrix in (:,1)-format
return X,y
X, y_true = GenerateSimpleData()
plt.plot(X, y_true, "r-.")
plt.legend(["y_true"])
plt.xlabel("X")
plt.ylabel("y")
plt.title("ANN, Groundtruth data simple")
print("OK")
OK
Qa)¶
We will fit the data generator and MLP in the layer 2. Them y_true and y_pred are plotted in a graph
# Create MLP and fit model, just run..
from sklearn.neural_network import MLPRegressor
mlp = MLPRegressor(activation = 'tanh', # activation function
hidden_layer_sizes = [2], # layes and neurons in layers: one hidden layer with two neurons
alpha = 1e-5, # regularization parameter
solver = 'lbfgs', # quasi-Newton solver
max_iter=10000,
verbose = True)
mlp.fit(X, y_true)
y_pred = mlp.predict(X)
# Extract weights and biases
weights = mlp.coefs_
biases = mlp.intercepts_
print("weights:")
print(weights)
print("biases:")
print(biases)
print("OK")
weights:
[array([[-1.99985578, -1.99966008]]), array([[ 3.00008787],
[-2.00001444]])]
biases:
[array([ 3.9997559 , -1.99924065]), array([-3.0659848e-05])]
OK
import matplotlib.pyplot as plt
plt.plot(X, y_true, "r", label="True")
plt.plot(X, y_pred, "b-.", label="Predicted")
plt.legend()
plt.xlabel("X")
plt.ylabel("y")
plt.title("True vs. Predicted")
plt.show()
Qb)¶
This drawing below illustrates the ANN with its input-, hidden- and output-layer
from IPython.display import Image
Image(filename="./Figs/ANN_drawing.jpg", width=500, height=500)
Qc)¶
First we extract the weights and biases into individual parameters so we can use them for the formula. Thereafter we use the formula:
activationFunction(weight*x+bias)
to find the output for each individual neuron.
# Extracted weights and biases
import numpy as np
# Extract the weights and biases
w11, w12 = weights[0][0]
w21 = weights[1][0]
w22 = weights[1][1]
b1, b2 = biases[0]
b3 = biases[1][0]
# Define the mathematical formula
def network_formula(x):
return (w21*np.tanh(w11*x+b1) + w22*np.tanh(w12*x+b2))+b3
# Test the formula with a sample input
x_sample = 1.5
y_sample = network_formula(x_sample)
print(f"For x = {x_sample}, y_math = {y_sample}")
mlp_sample_prediction= mlp.predict([[x_sample]])
print("mlp_sample_prediction:")
print(mlp_sample_prediction)
plt.plot(X, network_formula(X), "r", label="Formula")
plt.legend()
plt.xlabel("X")
plt.ylabel("y")
plt.title("Formula plot")
plt.show()
For x = 1.5, y_math = [4.28461614] mlp_sample_prediction: [4.28461614]
As we can see from the sample we get exactly the same from the formula as from the MLP prediction, and when we plot it it looks just like the MLP plot
Qd)¶
Here we compare the Math formula with the MLP-predicted values as well as the ground truth. We can see that they are almost exactly alike.
plt.plot(X, network_formula(X), "r", label="Formula")
plt.plot(X, y_true, "b-.", label="True")
plt.plot(X, y_pred, "g-.", label="Predicted")
plt.legend()
plt.xlabel("X")
plt.ylabel("y")
plt.title("Formula vs. True vs. Predicted")
plt.show()
Qe)¶
Here we plot the function for just the first neuron, just the second as well as the sum of the two + the bias.
def network_formula_first_half(x):
return (w21*np.tanh(w11*x+b1))
def network_formula_second_half(x):
return (w22*np.tanh(w12*x+b2))
sum = network_formula_first_half(X) + network_formula_second_half(X) +b3
plt.plot(X, network_formula_first_half(X), "r", label="Formula first half")
plt.plot(X, network_formula_second_half(X), "b", label="Formula second half")
plt.plot(X, sum, "g-.", label="Sum")
plt.legend()
plt.xlabel("X")
plt.ylabel("y")
plt.title("Formula plot")
plt.show()
On the plot we can see that the graph for the individual neurons in the MLP exhibit traits very similar to a monotonic tanh activation function since they both go exclusivlely one direction. However they go in the oposite direction, and therefore it totals to the correct function. This works since the function isn't very complex, but if we had to analyze a more complex system we would need more neurons who would all have traits like these and then it would total to the more complex function.
Qf)¶
Here is the data for a more complex function we need to analyze.
def GenerateSincData():
# A Sinc curve, approximation needs more neurons to capture the 'ringing'...
X = np.linspace(-3, 3, 1000)
y = np.sinc(X)
X = X.reshape(-1,1)
return X, y
X, y_true = GenerateSincData()
plt.plot(X, y_true, "r-")
plt.xlabel("X")
plt.ylabel("y")
plt.title("ANN, Groundtruth data for Sinc")
Text(0.5, 1.0, 'ANN, Groundtruth data for Sinc')
The data generator is changed to a function, that needs a NN with a higher capacity than the previous simple data. Therfore we increased the number of hidden layers to 5 and the amount of neurons per layer to 10 totalling to 50 hidden neurons compared to the 2 in the previous MLP.
mlp2 = MLPRegressor(activation = 'tanh', # activation function
hidden_layer_sizes = [10,10,10,10,10], # layes and neurons in layers: 5 hidden layers with 5 neurons
alpha = 1e-5, # regularization parameter
solver = 'lbfgs', # quasi-Newton solver
max_iter=10000,
verbose = True)
mlp.fit(X, y_true)
y_pred_sinc1 = mlp.predict(X)
mlp2.fit(X, y_true)
y_pred_sinc2 = mlp2.predict(X)
plt.plot(X, y_true, "r-", label="True")
plt.plot(X, y_pred_sinc1, "b-.", label="Old MLP")
plt.plot(X, y_pred_sinc2, "g-.", label="New MLP")
plt.xlabel("X")
plt.ylabel("y")
plt.title("Prediction vs Groundtruth for Sinc")
Text(0.5, 1.0, 'Prediction vs Groundtruth for Sinc')
As you can see, the new MLP with 5000% more neurons is significantly better at prediction, as it almost exactly on top of the ground truth. This is to be expected from a more complex graph.
SWMAL Exercise¶
Convolutional Neural Networks (CNNs)¶
- write an introduction to CNNs (what are CNNs, what is a convolution layer, etc..),
- document your experiments towards the end-goal of reaching 'a high accuracy' (what did you try, what work/did not work),
- document how you use 'generalization' in your setup (us of simple hold-out/train-test split or k-fold, or etc..),
- produce some sort of 'learning-curve' that illustrates the drop in cost- or increase in score-function with respect to, say training iteration (for inspiration see fig 4.20, 10-12 or 10.17 in [HOML]),
- document the final CNN setup (layers etc., perhaps as a graph/drawing),
- discus on your iterations towards the end-goal and other findings you had,
- and, as always, write a conclusion.
If you use a code template from slides, HOML or the internet, then remember to add a reference to the original work in you journal. Excercise 9 from [HOML], p.496 2nd./p.535 3rd. (slighty modified):
CNNs, short for Convolutional Neural Network, is a specialized type of neural network model, that is used to work with two-dimensional image data. It is used for processing grid-like data, such as images or video classification, in general series data and pattern recognition.
CNNs consist of different layers, that make up the functionality of the model. One of the key components is the convolutional layer that gives the network its name. Another important term is the filter, that performs the pattern recognition in the network
Convolutional layers are layers that apply learnable filters (kernels) to small regions of the input data. This is a linear operation that takes a uses one or more filters, that are matrixes with different biases. When applying this filter it will multiply the pixels in the input image with the bias of the filter and then find the sum of the region of the image that the filter is applied to. This will generate a new image/matix, where every pixel will represent the sum of the region applied.
This makes the network automatically learn the features from the data. The layer helps capture patterns in visual content.
Pooling Layer reduces the dimensionality, which can be reduced by the amounts of pixel. This can be done by down sampling.
The pooling layer has different approaches, such as, max pooling, average pooling and etc. Max pooling takes the maximum value from a group of neighbouring pixels to the input feature map, while average pooling takes the average of all values in the local region. The Idea is that we want to extract the most activated pixels from the first convolution and shrink the dimensions
Fully Connected Layers is after several convolution and pooling layers. There can be more than one of these layers and they act as a traditional neural network. Meaning they connect every neuron from the previous layer to every neuron in the next layer.
Activation Functions is the non-linearity in the model. This can be ReLu, sigmoid or many more. It helps the model to learn complex relationships between features.
Multiple Channels is often used to process images. Which allow to have multiple channels for each colour and for the outline of entity. The convolutional filters are applied independently to each channet.
Generalization
We are using stratified k-fold cross-validation. K-fold involves splitting the training set into k folds, then training the model k times, holding out a different fold each time for evaluation. Stratified means maintaining the same proportions of classes as the original dataset when creating subsets or folds for training and validation. So, the stratified k-fold performs stratified sampling to produce folds that contain a representative ratio of each class. With each iteration, the code creates a clone of the classifier. Using the clone, we train on the training folds and make predictions on the test fold. We then count the number of correct predictions and output the ratio of correct prediction
Learning Curve
For the learning curves, we plot two graphs: one for training loss over iterations and one for training accuracy over iterations. The x-axis is the number of epochs in both plots, while the y-axis is either the loss or the accuracy. Training loss is a measure of how well the machine learning model is performing on the training data. We can observe that throughout epochs, the training loss minimizes, and the predicted values become closer to the actual values.
Training accuracy is the accuracy over each epoch. As we can observe, throughout the epochs, it becomes closer to 1. Of course, it does not rise for every epoch, but the overall curve does.
Code setup We are using a CNN and the dataset is from mnist.
- We load the libary we are using
- We create train and test data from mnist
- We Preprocess the data with reshape and as type and convert y into one-hot encoded formate, so meaning each class is represented as a binary vector.
- Create the CNN model (Explaining with the piceture)
- Set up Stratified K-Fold cross-validation and lists to store training metrics at each iteration
- Loop over folds and use the right data fold
- Loop over epochs where we store each iteration for this fold
- After we put the store epochs data into our metrics for this fold
- Then we evaluate this fold
- All folds is trained and evaluate
- Calculate average test accuracy across folds
- Convert lists to NumPy arrays for easier plotting
- Plot Training Loss
- Plot Accuracy Loss
CNN setup
The CNN setup is visualised in following diagram:
Total params: 257,162 Trainable params: 257,162 Non-trainable params: 0
Experiment
In order to achieve the best accuracy outcome, we would do different tests, altering the amounts of folds and epochs. From the first test the average accuracy was already very good: Test average accuracy: 0.9915000200271606.
We generally test on the folds, to get a more robust estimate, since the model gets a more reliable predictioin when practicing with different sets. We also alternated the amount of pooling layers, to see if reducing the spatial dimensions would better the efficiency, while still maintaining accurate predictions.
Test with 3 folds and 5 epochs:¶
Test with 6 folds and 17 epochs:¶
Test with 6 folds and 17 epochs. Here is also removed a layer of pooling:¶
Test with 3 folds and 17 epochs. Here is also added a extra layer of pooling:¶
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), padding="same", activation='relu'))
From these test, we can conclude that it is the test with 6 folds and 17 epochs, which gave the best average accuracy score, with a score of 0.991599977016449
Iterations towards the end-goal
Through the tests we reached an end implementation with the optimal parameters, 6 folds and 17 epochs
from keras.datasets.mnist import load_data
from keras.utils import plot_model, to_categorical
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
(X_train, y_train ), (X_test, y_test) = load_data()
# Remove first element
X_train = X_train.reshape((-1, 28, 28, 1)).astype('float32') / 255.0
X_test = X_test.reshape((-1 , 28, 28, 1)).astype('float32') / 255.0
y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10)
# 3, 3 means pixel... creates a 3x 3 pixels.
def createModel():
model = Sequential()
model.add(Conv2D(32, (3, 3), padding="same",activation='relu', input_shape=(28, 28 , 1)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), padding="same", activation='relu'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
# Plot the model
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model
folds = 6
kfold = StratifiedKFold(n_splits=folds, shuffle=True, random_state=42)
test_accuracy = []
train_losses = []
train_accuracies = []
for train, val in kfold.split(X_train, y_train.argmax(1)):
model = createModel()
X_train_kfold, x_val_kfold = X_train[train], X_train[val]
y_train_kfold, y_val_kfold = y_train[train], y_train[val]
fold_train_losses = []
fold_train_accuracies = []
for epoch in range(17):
history = model.fit(X_train_kfold, y_train_kfold, epochs=1, batch_size=64, verbose=0)
fold_train_losses.append(history.history['loss'][0])
fold_train_accuracies.append(history.history['accuracy'][0])
train_losses.append(fold_train_losses)
train_accuracies.append(fold_train_accuracies)
test_loss, test_accuracy = model.evaluate(X_test, y_test)
avg_accuracy = np.mean(test_accuracy)
print(f'Test average accuracy: {avg_accuracy}')
train_losses = np.array(train_losses)
train_accuracies = np.array(train_accuracies)
# Plot Training Loss
plt.subplot(1, 2, 1)
plt.plot(np.mean(train_losses, axis=0), label='Training Loss')
plt.title('Training Loss Over Iterations')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
# Plot Training Accuracy
plt.subplot(1, 2, 2)
plt.plot(np.mean(train_accuracies, axis=0), label='Training Accuracy')
plt.title('Training Accuracy Over Iterations')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
plt.tight_layout()
plt.show()
You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) for plot_model to work. 313/313 [==============================] - 1s 2ms/step - loss: 0.0526 - accuracy: 0.9895 You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) for plot_model to work. 313/313 [==============================] - 1s 2ms/step - loss: 0.0551 - accuracy: 0.9879 You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) for plot_model to work. 313/313 [==============================] - 1s 3ms/step - loss: 0.0478 - accuracy: 0.9897 You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) for plot_model to work. 313/313 [==============================] - 1s 2ms/step - loss: 0.0760 - accuracy: 0.9885 You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) for plot_model to work. 313/313 [==============================] - 1s 2ms/step - loss: 0.0528 - accuracy: 0.9896 You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) for plot_model to work. 313/313 [==============================] - 1s 2ms/step - loss: 0.0563 - accuracy: 0.9891 You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) for plot_model to work. 313/313 [==============================] - 1s 3ms/step - loss: 0.0606 - accuracy: 0.9897 You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) for plot_model to work. 313/313 [==============================] - 1s 2ms/step - loss: 0.0567 - accuracy: 0.9879 Test average accuracy: 0.9879000186920166
Conclusion
In this exercise, we worked with CNNs, and specialized a model designed to process data in form of images. Through our experiments we could attain a high accuracy on the MNIST dataset using a CNN, by exploring varuiys configurations. The adjustment of folds and pooling layers had an effect og the average accuracy score, and with the average accuracy score and the learning curves, we could determine which approach would be best. Our final result gave a score of: """" and we can observe a decrease in the training loss and increase in accuracy.
SWMAL Exercise¶
Generalization Error¶
Description
In this exercise, we need to explain all important overall concepts in training. Let's begin with Figure 5.3 from Deep Learning (Ian Goodfellow, et. al. [DL]), that pretty much sums it all up
Qa) On Generalization Error¶
Description
Write a detailed description of figure 5.3 (above) for your hand-in.All concepts in the figure must be explained
training/generalization error,
underfit/overfit zone,
optimal capacity,
generalization gab,
and the two axes: x/capacity, y/error.
We gave Chat-gpt a link to the graph and the Qa description to see if it could solve the assignment, however we have also explained the terms in our own words underneath. This is the result:
ChatGTP answer
1. Training and Generalization Error: Training Error (Blue Curve): This curve represents the error (loss) on the training dataset as the capacity (complexity) of the model increases. Initially, the error decreases as the model becomes more complex, indicating better fit to the training data.Generalization Error (Red Curve): This curve represents the error (loss) on an unseen test/validation dataset. It measures how well the model performs on new, unseen data that it wasn't trained on.
- Underfit and Overfit Zone: Underfitting (Left Side): When the model's capacity is too low (left side of the graph), it struggles to capture the underlying patterns in the data. Both the training and generalization errors are high. This is a sign of insufficient complexity.
Overfitting (Right Side): As the model's capacity increases, it starts to fit not only the underlying patterns but also the noise in the training data. This leads to a significant drop in training error but an increase in generalization error. The model is now fitting the noise, which is detrimental for unseen data.
- Optimal Capacity: Optimal Capacity (Middle Area): The sweet spot in the middle represents the optimal capacity of the model. It's the point where the generalization error is minimized. The model has enough complexity to capture the underlying patterns without overfitting to the noise.
- Generalization Gap: Generalization Gap (Vertical Distance): It's the vertical distance between the training error curve and the generalization error curve. It measures the difference in performance between the training and test datasets. A large gap indicates overfitting, while a small gap indicates good generalization.
- Axes: x/Capacity, y/Error: x-axis (Capacity): This represents the complexity or capacity of the model. It's a measure of how flexible the model is in capturing intricate patterns in the data.
y-axis (Error): This axis shows the error (loss) associated with the model's predictions. Lower values indicate better performance, while higher values indicate poorer performance.
In summary, Figure 5.3 illustrates the trade-off between the complexity of a model (capacity) and its ability to generalize to new, unseen data. The goal is to find the optimal capacity that minimizes the generalization error, striking a balance between underfitting and overfitting.
On the graph a the y-axis describes the error amount, aka how often the model makes a wrong or partially incorrect prediction. The x-axis describes the capacity of the model, aka how capable or complex the model is. This can be increased by for example adding more neurons per layer, adding more total layers or switching to a more complex model.
training error The blue curve is training error. The training error curve describes the error percentage in relation to the capacity/complexity of the training model when used on the training data. Naturally when we increase the power of our model, the model should be able to understand and more patterns in the data. This is however not necnecessarily a good thing which will be elaborated in the overfitting segment.
Generalization error The green curve is the Generalization error. The generalization error describes the error percentage in relation to the capacity/complexity of the training model when used on unseen test data.
Underfitting Underfitting happens when a model is unable to understand enough of the patterns if any in the training and test data. This results in a higher training- and generalization error. If your model has a high generaliztion and tranning error it could be a sign that you have underfitted your model, and need to increase the complexity of the model. This can be done in many different ways.
Overfitting Overfitting is when a too capable model is being used on a data set. This results in the model finding patterns in the "noise" of the training data that potntionally aren't relevant and therefore result in a higher generalization error. The Training error is typically very low since the very capable model has found all the patterns and more in the training data. If you experience a high generlization error and a very low training error it could be an indication that you need to reduce the capacity of your model, and the model has overcomplicated its understanding of the data. It could also be because there are patterns in the training data which are not present in the test data, so it would be a good idea to also check that you data-split correctly and mix the data.
Generalization gap The generalization gap, is the vertical gap between the training error curve and the generalization curve. This means it describes the size of the difference between the errors that occur when the model is used on the training data and when the model is used on the test data. It typically increases when the model has been overfitted.
Optimal Capacity The optimal capacity is the perfect complexity for the model where it filters out the noise-patterns and picks up as many of the actual patterns as possible. This results in the training error generalization error and generalization gap being as low as possible. It is what you want to get as close to as possible when fitting you model.
Qb A MSE-Epoch/Error Plot¶
Description
Next, we look at a SGD model for fitting polynomial, that is _polynomial regression_ similar to what GƩron describes in [HOML] ("Polynomial Regression" + "Learning Curves").Review the code below for plotting the RMSE vs. the iteration number or epoch below (three cells, part I/II/III).
Write a short description of the code, and comment on the important points in the generation of the (R)MSE array.
The training phase output lots of lines like
epoch= 104, mse_train=1.50, mse_val=2.37
epoch= 105, mse_train=1.49, mse_val=2.35
What is an epoch and what is mse_train and mse_val?
NOTE$_1$: the generalization plot figure 5.3 in [DL] (above) and the plots below have different x-axis, and are not to be compared directly!
NOTE$_2$: notice that a 90 degree polynomial is used for the polynomial regression. This is just to produce a model with an extremly high capacity.
Part 1:
The GenerateData() is used to generate random data points for X and add some noise to the random variable y to simulate realworld data. The y is a quadratic function, which is this mathimatical function f(x)=ax2+bx+c. After the the data is spilt into a training set and a validation set.
After we create a pipeline which takes the PolynomialFeatures and StandardScaler. The
After we create a pipeline which takes the PolynomialFeatures and StandardScaler. PolynomialFeatures is a prepocessor which have to inputs: the degree and the biased. We rotate the features 90 degree and don't include the bias. This is just to produce a model with an extremly high capacity. StandardScaler also a preprocesser, that standardized the featues in the dataset. This is done by removing the mean and center the feature distribution around zero and after the scales the features variance. So this mean the variance of each feature will be 1.
The we train and validation set are transformed with X_train_poly_scaled = poly_scaler.fit_transform(X_train) and X_val_poly_scaled = poly_scaler.transform(X_val)
Part 2:
Creates a function call Train. It takes the inputs def Train(X_train, y_train, X_val, y_val, n_epochs, verbose=False). It makes a array of errors for both the validation and train. Then we run a SDGResgresster with only one iteration, where we have a constant learning rate.
There after we have a for loop we run though each number of epochs, where we fit and predict for both train and validation. TTen we add the MSE for training and validation to the respective error array. if the verbose is set too true we print out each epoch and MSE for training and validation.
epoch= 104, mse_train=1.50, mse_val=2.37
Theepochis the number of training iteration. In each epoch train and validate the set and put it into a array. Themse_trainis the mean squared error on the training set and it measures how well the it preform in this epoch. The same count for themse_val. The output is use to help us monitor the training process and assess how well the model is learning form the data.
Part 3:
We find the best epoch using the minimum of our validate errors array. After that we calculate the RMSE for the best preform epoch. Then we plot out this epoch with a arrow. The plot is the epoch in the x-axis and RSME in the y-axis. A horizontal dotted line indicates the RMSE of the best model. The best model is the one with the lowest validation RMSE. This model will follow the underlying trend in the data without fitting too closely to the noise and is the best to unseen data.
# Run code: Qb(part I)
# NOTE: modified code from [GITHOML], 04_training_linear_models.ipynb
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.linear_model import SGDRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
np.random.seed(42)
def GenerateData():
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 2 + X + 0.5 * X**2 + np.random.randn(m, 1)
return X, y
X, y = GenerateData()
X_train, X_val, y_train, y_val = \
train_test_split( \
X[:50], y[:50].ravel(), \
test_size=0.5, \
random_state=10)
print("X_train.shape=",X_train.shape)
print("X_val .shape=",X_val.shape)
print("y_train.shape=",y_train.shape)
print("y_val .shape=",y_val.shape)
poly_scaler = Pipeline([
("poly_features", PolynomialFeatures(degree=90, include_bias=False)),
("std_scaler", StandardScaler()),
])
X_train_poly_scaled = poly_scaler.fit_transform(X_train)
X_val_poly_scaled = poly_scaler.transform(X_val)
X_new=np.linspace(-3, 3, 100).reshape(100, 1)
plt.plot(X, y, "b.", label="All X-y Data")
plt.xlabel("$x_1$", fontsize=18, )
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.legend(loc="upper left", fontsize=14)
plt.axis([-3, 3, 0, 10])
plt.show()
print('OK')
X_train.shape= (25, 1) X_val .shape= (25, 1) y_train.shape= (25,) y_val .shape= (25,)
OK
# Run code: Qb(part II)
def Train(X_train, y_train, X_val, y_val, n_epochs, verbose=False):
print("Training...n_epochs=",n_epochs)
train_errors, val_errors = [], []
sgd_reg = SGDRegressor(max_iter=1,
penalty=None,
eta0=0.0005,
warm_start=True,
early_stopping=False,
learning_rate="constant",
tol=-float(0),
random_state=42)
for epoch in range(n_epochs):
sgd_reg.fit(X_train, y_train)
y_train_predict = sgd_reg.predict(X_train)
y_val_predict = sgd_reg.predict(X_val)
mse_train=mean_squared_error(y_train, y_train_predict)
mse_val =mean_squared_error(y_val , y_val_predict)
train_errors.append(mse_train)
val_errors .append(mse_val)
if verbose:
print(f" epoch={epoch:4d}, mse_train={mse_train:4.2f}, mse_val={mse_val:4.2f}")
return train_errors, val_errors
n_epochs = 500
train_errors, val_errors = Train(X_train_poly_scaled, y_train, X_val_poly_scaled, y_val, n_epochs, True)
print('OK')
Training...n_epochs= 500 epoch= 0, mse_train=11.85, mse_val=14.58 epoch= 1, mse_train=11.51, mse_val=14.10 epoch= 2, mse_train=11.15, mse_val=13.60 epoch= 3, mse_train=10.81, mse_val=13.13 epoch= 4, mse_train=10.49, mse_val=12.70 epoch= 5, mse_train=10.18, mse_val=12.30 epoch= 6, mse_train=9.88, mse_val=11.92 epoch= 7, mse_train=9.60, mse_val=11.56 epoch= 8, mse_train=9.33, mse_val=11.23 epoch= 9, mse_train=9.07, mse_val=10.91 epoch= 10, mse_train=8.82, mse_val=10.62 epoch= 11, mse_train=8.59, mse_val=10.34 epoch= 12, mse_train=8.36, mse_val=10.07 epoch= 13, mse_train=8.14, mse_val=9.82 epoch= 14, mse_train=7.93, mse_val=9.57 epoch= 15, mse_train=7.72, mse_val=9.34 epoch= 16, mse_train=7.53, mse_val=9.12 epoch= 17, mse_train=7.34, mse_val=8.91 epoch= 18, mse_train=7.16, mse_val=8.71 epoch= 19, mse_train=6.98, mse_val=8.52 epoch= 20, mse_train=6.81, mse_val=8.33 epoch= 21, mse_train=6.65, mse_val=8.15 epoch= 22, mse_train=6.49, mse_val=7.98 epoch= 23, mse_train=6.34, mse_val=7.81 epoch= 24, mse_train=6.19, mse_val=7.65 epoch= 25, mse_train=6.05, mse_val=7.49 epoch= 26, mse_train=5.91, mse_val=7.34 epoch= 27, mse_train=5.77, mse_val=7.20 epoch= 28, mse_train=5.64, mse_val=7.06 epoch= 29, mse_train=5.52, mse_val=6.92 epoch= 30, mse_train=5.40, mse_val=6.79 epoch= 31, mse_train=5.28, mse_val=6.66 epoch= 32, mse_train=5.16, mse_val=6.54 epoch= 33, mse_train=5.05, mse_val=6.42 epoch= 34, mse_train=4.94, mse_val=6.30 epoch= 35, mse_train=4.84, mse_val=6.18 epoch= 36, mse_train=4.73, mse_val=6.07 epoch= 37, mse_train=4.63, mse_val=5.97 epoch= 38, mse_train=4.54, mse_val=5.86 epoch= 39, mse_train=4.44, mse_val=5.76 epoch= 40, mse_train=4.35, mse_val=5.66 epoch= 41, mse_train=4.26, mse_val=5.56 epoch= 42, mse_train=4.17, mse_val=5.47 epoch= 43, mse_train=4.09, mse_val=5.37 epoch= 44, mse_train=4.01, mse_val=5.28 epoch= 45, mse_train=3.93, mse_val=5.20 epoch= 46, mse_train=3.85, mse_val=5.11 epoch= 47, mse_train=3.77, mse_val=5.03 epoch= 48, mse_train=3.70, mse_val=4.95 epoch= 49, mse_train=3.63, mse_val=4.87 epoch= 50, mse_train=3.56, mse_val=4.79 epoch= 51, mse_train=3.49, mse_val=4.71 epoch= 52, mse_train=3.42, mse_val=4.64 epoch= 53, mse_train=3.36, mse_val=4.57 epoch= 54, mse_train=3.29, mse_val=4.49 epoch= 55, mse_train=3.23, mse_val=4.43 epoch= 56, mse_train=3.17, mse_val=4.36 epoch= 57, mse_train=3.11, mse_val=4.29 epoch= 58, mse_train=3.06, mse_val=4.23 epoch= 59, mse_train=3.00, mse_val=4.16 epoch= 60, mse_train=2.95, mse_val=4.10 epoch= 61, mse_train=2.89, mse_val=4.04 epoch= 62, mse_train=2.84, mse_val=3.98 epoch= 63, mse_train=2.79, mse_val=3.93 epoch= 64, mse_train=2.74, mse_val=3.87 epoch= 65, mse_train=2.70, mse_val=3.81 epoch= 66, mse_train=2.65, mse_val=3.76 epoch= 67, mse_train=2.60, mse_val=3.71 epoch= 68, mse_train=2.56, mse_val=3.66 epoch= 69, mse_train=2.52, mse_val=3.60 epoch= 70, mse_train=2.47, mse_val=3.56 epoch= 71, mse_train=2.43, mse_val=3.51 epoch= 72, mse_train=2.39, mse_val=3.46 epoch= 73, mse_train=2.35, mse_val=3.41 epoch= 74, mse_train=2.31, mse_val=3.37 epoch= 75, mse_train=2.28, mse_val=3.32 epoch= 76, mse_train=2.24, mse_val=3.28 epoch= 77, mse_train=2.20, mse_val=3.24 epoch= 78, mse_train=2.17, mse_val=3.20 epoch= 79, mse_train=2.14, mse_val=3.15 epoch= 80, mse_train=2.10, mse_val=3.12 epoch= 81, mse_train=2.07, mse_val=3.08 epoch= 82, mse_train=2.04, mse_val=3.04 epoch= 83, mse_train=2.01, mse_val=3.00 epoch= 84, mse_train=1.98, mse_val=2.96 epoch= 85, mse_train=1.95, mse_val=2.93 epoch= 86, mse_train=1.92, mse_val=2.89 epoch= 87, mse_train=1.89, mse_val=2.86 epoch= 88, mse_train=1.86, mse_val=2.82 epoch= 89, mse_train=1.84, mse_val=2.79 epoch= 90, mse_train=1.81, mse_val=2.76 epoch= 91, mse_train=1.79, mse_val=2.73 epoch= 92, mse_train=1.76, mse_val=2.70 epoch= 93, mse_train=1.74, mse_val=2.67 epoch= 94, mse_train=1.71, mse_val=2.64 epoch= 95, mse_train=1.69, mse_val=2.61 epoch= 96, mse_train=1.67, mse_val=2.58 epoch= 97, mse_train=1.65, mse_val=2.55 epoch= 98, mse_train=1.62, mse_val=2.52 epoch= 99, mse_train=1.60, mse_val=2.50 epoch= 100, mse_train=1.58, mse_val=2.47 epoch= 101, mse_train=1.56, mse_val=2.45 epoch= 102, mse_train=1.54, mse_val=2.42 epoch= 103, mse_train=1.52, mse_val=2.40 epoch= 104, mse_train=1.50, mse_val=2.37 epoch= 105, mse_train=1.49, mse_val=2.35 epoch= 106, mse_train=1.47, mse_val=2.33 epoch= 107, mse_train=1.45, mse_val=2.30 epoch= 108, mse_train=1.43, mse_val=2.28 epoch= 109, mse_train=1.42, mse_val=2.26 epoch= 110, mse_train=1.40, mse_val=2.24 epoch= 111, mse_train=1.38, mse_val=2.22 epoch= 112, mse_train=1.37, mse_val=2.20 epoch= 113, mse_train=1.35, mse_val=2.18 epoch= 114, mse_train=1.34, mse_val=2.16 epoch= 115, mse_train=1.32, mse_val=2.14 epoch= 116, mse_train=1.31, mse_val=2.12 epoch= 117, mse_train=1.30, mse_val=2.10 epoch= 118, mse_train=1.28, mse_val=2.08 epoch= 119, mse_train=1.27, mse_val=2.06 epoch= 120, mse_train=1.26, mse_val=2.05 epoch= 121, mse_train=1.24, mse_val=2.03 epoch= 122, mse_train=1.23, mse_val=2.01 epoch= 123, mse_train=1.22, mse_val=2.00 epoch= 124, mse_train=1.21, mse_val=1.98 epoch= 125, mse_train=1.19, mse_val=1.97 epoch= 126, mse_train=1.18, mse_val=1.95
c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn(
epoch= 127, mse_train=1.17, mse_val=1.94 epoch= 128, mse_train=1.16, mse_val=1.92 epoch= 129, mse_train=1.15, mse_val=1.91 epoch= 130, mse_train=1.14, mse_val=1.89 epoch= 131, mse_train=1.13, mse_val=1.88 epoch= 132, mse_train=1.12, mse_val=1.87 epoch= 133, mse_train=1.11, mse_val=1.85 epoch= 134, mse_train=1.10, mse_val=1.84 epoch= 135, mse_train=1.09, mse_val=1.83 epoch= 136, mse_train=1.08, mse_val=1.81 epoch= 137, mse_train=1.07, mse_val=1.80 epoch= 138, mse_train=1.06, mse_val=1.79 epoch= 139, mse_train=1.06, mse_val=1.78 epoch= 140, mse_train=1.05, mse_val=1.77 epoch= 141, mse_train=1.04, mse_val=1.76 epoch= 142, mse_train=1.03, mse_val=1.74 epoch= 143, mse_train=1.02, mse_val=1.73 epoch= 144, mse_train=1.02, mse_val=1.72 epoch= 145, mse_train=1.01, mse_val=1.71 epoch= 146, mse_train=1.00, mse_val=1.70 epoch= 147, mse_train=0.99, mse_val=1.69 epoch= 148, mse_train=0.99, mse_val=1.68 epoch= 149, mse_train=0.98, mse_val=1.67 epoch= 150, mse_train=0.97, mse_val=1.67 epoch= 151, mse_train=0.97, mse_val=1.66 epoch= 152, mse_train=0.96, mse_val=1.65 epoch= 153, mse_train=0.95, mse_val=1.64 epoch= 154, mse_train=0.95, mse_val=1.63 epoch= 155, mse_train=0.94, mse_val=1.62 epoch= 156, mse_train=0.93, mse_val=1.61 epoch= 157, mse_train=0.93, mse_val=1.61 epoch= 158, mse_train=0.92, mse_val=1.60 epoch= 159, mse_train=0.92, mse_val=1.59 epoch= 160, mse_train=0.91, mse_val=1.58 epoch= 161, mse_train=0.91, mse_val=1.58 epoch= 162, mse_train=0.90, mse_val=1.57 epoch= 163, mse_train=0.90, mse_val=1.56 epoch= 164, mse_train=0.89, mse_val=1.56 epoch= 165, mse_train=0.89, mse_val=1.55 epoch= 166, mse_train=0.88, mse_val=1.54 epoch= 167, mse_train=0.88, mse_val=1.54 epoch= 168, mse_train=0.87, mse_val=1.53 epoch= 169, mse_train=0.87, mse_val=1.52 epoch= 170, mse_train=0.86, mse_val=1.52 epoch= 171, mse_train=0.86, mse_val=1.51 epoch= 172, mse_train=0.85, mse_val=1.51 epoch= 173, mse_train=0.85, mse_val=1.50 epoch= 174, mse_train=0.84, mse_val=1.50 epoch= 175, mse_train=0.84, mse_val=1.49 epoch= 176, mse_train=0.84, mse_val=1.49 epoch= 177, mse_train=0.83, mse_val=1.48 epoch= 178, mse_train=0.83, mse_val=1.48 epoch= 179, mse_train=0.82, mse_val=1.47 epoch= 180, mse_train=0.82, mse_val=1.47 epoch= 181, mse_train=0.82, mse_val=1.46 epoch= 182, mse_train=0.81, mse_val=1.46 epoch= 183, mse_train=0.81, mse_val=1.45 epoch= 184, mse_train=0.81, mse_val=1.45 epoch= 185, mse_train=0.80, mse_val=1.45 epoch= 186, mse_train=0.80, mse_val=1.44 epoch= 187, mse_train=0.80, mse_val=1.44 epoch= 188, mse_train=0.79, mse_val=1.43 epoch= 189, mse_train=0.79, mse_val=1.43 epoch= 190, mse_train=0.79, mse_val=1.43 epoch= 191, mse_train=0.78, mse_val=1.42 epoch= 192, mse_train=0.78, mse_val=1.42 epoch= 193, mse_train=0.78, mse_val=1.42 epoch= 194, mse_train=0.77, mse_val=1.41 epoch= 195, mse_train=0.77, mse_val=1.41 epoch= 196, mse_train=0.77, mse_val=1.41 epoch= 197, mse_train=0.77, mse_val=1.40 epoch= 198, mse_train=0.76, mse_val=1.40 epoch= 199, mse_train=0.76, mse_val=1.40 epoch= 200, mse_train=0.76, mse_val=1.40 epoch= 201, mse_train=0.75, mse_val=1.39 epoch= 202, mse_train=0.75, mse_val=1.39 epoch= 203, mse_train=0.75, mse_val=1.39 epoch= 204, mse_train=0.75, mse_val=1.39 epoch= 205, mse_train=0.74, mse_val=1.39 epoch= 206, mse_train=0.74, mse_val=1.38 epoch= 207, mse_train=0.74, mse_val=1.38 epoch= 208, mse_train=0.74, mse_val=1.38 epoch= 209, mse_train=0.73, mse_val=1.38 epoch= 210, mse_train=0.73, mse_val=1.38 epoch= 211, mse_train=0.73, mse_val=1.37 epoch= 212, mse_train=0.73, mse_val=1.37 epoch= 213, mse_train=0.73, mse_val=1.37 epoch= 214, mse_train=0.72, mse_val=1.37 epoch= 215, mse_train=0.72, mse_val=1.37 epoch= 216, mse_train=0.72, mse_val=1.37 epoch= 217, mse_train=0.72, mse_val=1.36 epoch= 218, mse_train=0.72, mse_val=1.36 epoch= 219, mse_train=0.71, mse_val=1.36 epoch= 220, mse_train=0.71, mse_val=1.36 epoch= 221, mse_train=0.71, mse_val=1.36 epoch= 222, mse_train=0.71, mse_val=1.36 epoch= 223, mse_train=0.71, mse_val=1.36 epoch= 224, mse_train=0.70, mse_val=1.36 epoch= 225, mse_train=0.70, mse_val=1.36 epoch= 226, mse_train=0.70, mse_val=1.36 epoch= 227, mse_train=0.70, mse_val=1.36 epoch= 228, mse_train=0.70, mse_val=1.35 epoch= 229, mse_train=0.70, mse_val=1.35 epoch= 230, mse_train=0.69, mse_val=1.35 epoch= 231, mse_train=0.69, mse_val=1.35 epoch= 232, mse_train=0.69, mse_val=1.35 epoch= 233, mse_train=0.69, mse_val=1.35 epoch= 234, mse_train=0.69, mse_val=1.35 epoch= 235, mse_train=0.69, mse_val=1.35
c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn(
epoch= 236, mse_train=0.68, mse_val=1.35 epoch= 237, mse_train=0.68, mse_val=1.35 epoch= 238, mse_train=0.68, mse_val=1.35 epoch= 239, mse_train=0.68, mse_val=1.35 epoch= 240, mse_train=0.68, mse_val=1.35 epoch= 241, mse_train=0.68, mse_val=1.35 epoch= 242, mse_train=0.67, mse_val=1.35 epoch= 243, mse_train=0.67, mse_val=1.35 epoch= 244, mse_train=0.67, mse_val=1.35 epoch= 245, mse_train=0.67, mse_val=1.35 epoch= 246, mse_train=0.67, mse_val=1.35 epoch= 247, mse_train=0.67, mse_val=1.35 epoch= 248, mse_train=0.67, mse_val=1.35 epoch= 249, mse_train=0.67, mse_val=1.35 epoch= 250, mse_train=0.66, mse_val=1.35 epoch= 251, mse_train=0.66, mse_val=1.35 epoch= 252, mse_train=0.66, mse_val=1.35 epoch= 253, mse_train=0.66, mse_val=1.36 epoch= 254, mse_train=0.66, mse_val=1.36 epoch= 255, mse_train=0.66, mse_val=1.36 epoch= 256, mse_train=0.66, mse_val=1.36 epoch= 257, mse_train=0.66, mse_val=1.36 epoch= 258, mse_train=0.65, mse_val=1.36 epoch= 259, mse_train=0.65, mse_val=1.36 epoch= 260, mse_train=0.65, mse_val=1.36 epoch= 261, mse_train=0.65, mse_val=1.36 epoch= 262, mse_train=0.65, mse_val=1.36 epoch= 263, mse_train=0.65, mse_val=1.36 epoch= 264, mse_train=0.65, mse_val=1.36 epoch= 265, mse_train=0.65, mse_val=1.37 epoch= 266, mse_train=0.65, mse_val=1.37 epoch= 267, mse_train=0.64, mse_val=1.37 epoch= 268, mse_train=0.64, mse_val=1.37 epoch= 269, mse_train=0.64, mse_val=1.37 epoch= 270, mse_train=0.64, mse_val=1.37 epoch= 271, mse_train=0.64, mse_val=1.37 epoch= 272, mse_train=0.64, mse_val=1.37 epoch= 273, mse_train=0.64, mse_val=1.37 epoch= 274, mse_train=0.64, mse_val=1.38 epoch= 275, mse_train=0.64, mse_val=1.38 epoch= 276, mse_train=0.64, mse_val=1.38 epoch= 277, mse_train=0.63, mse_val=1.38 epoch= 278, mse_train=0.63, mse_val=1.38 epoch= 279, mse_train=0.63, mse_val=1.38 epoch= 280, mse_train=0.63, mse_val=1.38 epoch= 281, mse_train=0.63, mse_val=1.39 epoch= 282, mse_train=0.63, mse_val=1.39 epoch= 283, mse_train=0.63, mse_val=1.39 epoch= 284, mse_train=0.63, mse_val=1.39 epoch= 285, mse_train=0.63, mse_val=1.39 epoch= 286, mse_train=0.63, mse_val=1.39 epoch= 287, mse_train=0.63, mse_val=1.40 epoch= 288, mse_train=0.62, mse_val=1.40 epoch= 289, mse_train=0.62, mse_val=1.40 epoch= 290, mse_train=0.62, mse_val=1.40 epoch= 291, mse_train=0.62, mse_val=1.40 epoch= 292, mse_train=0.62, mse_val=1.40 epoch= 293, mse_train=0.62, mse_val=1.41 epoch= 294, mse_train=0.62, mse_val=1.41 epoch= 295, mse_train=0.62, mse_val=1.41 epoch= 296, mse_train=0.62, mse_val=1.41 epoch= 297, mse_train=0.62, mse_val=1.41 epoch= 298, mse_train=0.62, mse_val=1.42 epoch= 299, mse_train=0.62, mse_val=1.42 epoch= 300, mse_train=0.61, mse_val=1.42 epoch= 301, mse_train=0.61, mse_val=1.42 epoch= 302, mse_train=0.61, mse_val=1.42 epoch= 303, mse_train=0.61, mse_val=1.43 epoch= 304, mse_train=0.61, mse_val=1.43 epoch= 305, mse_train=0.61, mse_val=1.43 epoch= 306, mse_train=0.61, mse_val=1.43 epoch= 307, mse_train=0.61, mse_val=1.43 epoch= 308, mse_train=0.61, mse_val=1.44 epoch= 309, mse_train=0.61, mse_val=1.44 epoch= 310, mse_train=0.61, mse_val=1.44 epoch= 311, mse_train=0.61, mse_val=1.44 epoch= 312, mse_train=0.61, mse_val=1.44 epoch= 313, mse_train=0.61, mse_val=1.45 epoch= 314, mse_train=0.60, mse_val=1.45 epoch= 315, mse_train=0.60, mse_val=1.45 epoch= 316, mse_train=0.60, mse_val=1.45 epoch= 317, mse_train=0.60, mse_val=1.46 epoch= 318, mse_train=0.60, mse_val=1.46 epoch= 319, mse_train=0.60, mse_val=1.46 epoch= 320, mse_train=0.60, mse_val=1.46 epoch= 321, mse_train=0.60, mse_val=1.47 epoch= 322, mse_train=0.60, mse_val=1.47 epoch= 323, mse_train=0.60, mse_val=1.47 epoch= 324, mse_train=0.60, mse_val=1.47 epoch= 325, mse_train=0.60, mse_val=1.48 epoch= 326, mse_train=0.60, mse_val=1.48 epoch= 327, mse_train=0.60, mse_val=1.48 epoch= 328, mse_train=0.60, mse_val=1.48 epoch= 329, mse_train=0.60, mse_val=1.49 epoch= 330, mse_train=0.59, mse_val=1.49 epoch= 331, mse_train=0.59, mse_val=1.49 epoch= 332, mse_train=0.59, mse_val=1.49 epoch= 333, mse_train=0.59, mse_val=1.50 epoch= 334, mse_train=0.59, mse_val=1.50 epoch= 335, mse_train=0.59, mse_val=1.50 epoch= 336, mse_train=0.59, mse_val=1.50 epoch= 337, mse_train=0.59, mse_val=1.51 epoch= 338, mse_train=0.59, mse_val=1.51 epoch= 339, mse_train=0.59, mse_val=1.51 epoch= 340, mse_train=0.59, mse_val=1.51 epoch= 341, mse_train=0.59, mse_val=1.52 epoch= 342, mse_train=0.59, mse_val=1.52 epoch= 343, mse_train=0.59, mse_val=1.52 epoch= 344, mse_train=0.59, mse_val=1.52 epoch= 345, mse_train=0.59, mse_val=1.53 epoch= 346, mse_train=0.59, mse_val=1.53 epoch= 347, mse_train=0.59, mse_val=1.53 epoch= 348, mse_train=0.58, mse_val=1.53 epoch= 349, mse_train=0.58, mse_val=1.54
c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn(
epoch= 350, mse_train=0.58, mse_val=1.54 epoch= 351, mse_train=0.58, mse_val=1.54 epoch= 352, mse_train=0.58, mse_val=1.55 epoch= 353, mse_train=0.58, mse_val=1.55 epoch= 354, mse_train=0.58, mse_val=1.55 epoch= 355, mse_train=0.58, mse_val=1.55 epoch= 356, mse_train=0.58, mse_val=1.56 epoch= 357, mse_train=0.58, mse_val=1.56 epoch= 358, mse_train=0.58, mse_val=1.56 epoch= 359, mse_train=0.58, mse_val=1.57 epoch= 360, mse_train=0.58, mse_val=1.57 epoch= 361, mse_train=0.58, mse_val=1.57 epoch= 362, mse_train=0.58, mse_val=1.57 epoch= 363, mse_train=0.58, mse_val=1.58 epoch= 364, mse_train=0.58, mse_val=1.58 epoch= 365, mse_train=0.58, mse_val=1.58 epoch= 366, mse_train=0.58, mse_val=1.59 epoch= 367, mse_train=0.58, mse_val=1.59 epoch= 368, mse_train=0.58, mse_val=1.59 epoch= 369, mse_train=0.58, mse_val=1.59 epoch= 370, mse_train=0.57, mse_val=1.60 epoch= 371, mse_train=0.57, mse_val=1.60 epoch= 372, mse_train=0.57, mse_val=1.60 epoch= 373, mse_train=0.57, mse_val=1.61 epoch= 374, mse_train=0.57, mse_val=1.61 epoch= 375, mse_train=0.57, mse_val=1.61 epoch= 376, mse_train=0.57, mse_val=1.61 epoch= 377, mse_train=0.57, mse_val=1.62 epoch= 378, mse_train=0.57, mse_val=1.62 epoch= 379, mse_train=0.57, mse_val=1.62 epoch= 380, mse_train=0.57, mse_val=1.63 epoch= 381, mse_train=0.57, mse_val=1.63 epoch= 382, mse_train=0.57, mse_val=1.63 epoch= 383, mse_train=0.57, mse_val=1.64 epoch= 384, mse_train=0.57, mse_val=1.64 epoch= 385, mse_train=0.57, mse_val=1.64 epoch= 386, mse_train=0.57, mse_val=1.64 epoch= 387, mse_train=0.57, mse_val=1.65 epoch= 388, mse_train=0.57, mse_val=1.65 epoch= 389, mse_train=0.57, mse_val=1.65 epoch= 390, mse_train=0.57, mse_val=1.66 epoch= 391, mse_train=0.57, mse_val=1.66 epoch= 392, mse_train=0.57, mse_val=1.66 epoch= 393, mse_train=0.57, mse_val=1.67 epoch= 394, mse_train=0.57, mse_val=1.67 epoch= 395, mse_train=0.56, mse_val=1.67 epoch= 396, mse_train=0.56, mse_val=1.67 epoch= 397, mse_train=0.56, mse_val=1.68 epoch= 398, mse_train=0.56, mse_val=1.68 epoch= 399, mse_train=0.56, mse_val=1.68 epoch= 400, mse_train=0.56, mse_val=1.69 epoch= 401, mse_train=0.56, mse_val=1.69 epoch= 402, mse_train=0.56, mse_val=1.69 epoch= 403, mse_train=0.56, mse_val=1.70 epoch= 404, mse_train=0.56, mse_val=1.70 epoch= 405, mse_train=0.56, mse_val=1.70 epoch= 406, mse_train=0.56, mse_val=1.70 epoch= 407, mse_train=0.56, mse_val=1.71 epoch= 408, mse_train=0.56, mse_val=1.71 epoch= 409, mse_train=0.56, mse_val=1.71 epoch= 410, mse_train=0.56, mse_val=1.72 epoch= 411, mse_train=0.56, mse_val=1.72 epoch= 412, mse_train=0.56, mse_val=1.72 epoch= 413, mse_train=0.56, mse_val=1.73 epoch= 414, mse_train=0.56, mse_val=1.73 epoch= 415, mse_train=0.56, mse_val=1.73 epoch= 416, mse_train=0.56, mse_val=1.74 epoch= 417, mse_train=0.56, mse_val=1.74 epoch= 418, mse_train=0.56, mse_val=1.74 epoch= 419, mse_train=0.56, mse_val=1.74 epoch= 420, mse_train=0.56, mse_val=1.75 epoch= 421, mse_train=0.56, mse_val=1.75 epoch= 422, mse_train=0.56, mse_val=1.75 epoch= 423, mse_train=0.56, mse_val=1.76 epoch= 424, mse_train=0.56, mse_val=1.76 epoch= 425, mse_train=0.55, mse_val=1.76 epoch= 426, mse_train=0.55, mse_val=1.77 epoch= 427, mse_train=0.55, mse_val=1.77 epoch= 428, mse_train=0.55, mse_val=1.77 epoch= 429, mse_train=0.55, mse_val=1.78 epoch= 430, mse_train=0.55, mse_val=1.78 epoch= 431, mse_train=0.55, mse_val=1.78 epoch= 432, mse_train=0.55, mse_val=1.78 epoch= 433, mse_train=0.55, mse_val=1.79 epoch= 434, mse_train=0.55, mse_val=1.79 epoch= 435, mse_train=0.55, mse_val=1.79 epoch= 436, mse_train=0.55, mse_val=1.80 epoch= 437, mse_train=0.55, mse_val=1.80 epoch= 438, mse_train=0.55, mse_val=1.80 epoch= 439, mse_train=0.55, mse_val=1.81 epoch= 440, mse_train=0.55, mse_val=1.81 epoch= 441, mse_train=0.55, mse_val=1.81 epoch= 442, mse_train=0.55, mse_val=1.82 epoch= 443, mse_train=0.55, mse_val=1.82 epoch= 444, mse_train=0.55, mse_val=1.82 epoch= 445, mse_train=0.55, mse_val=1.82 epoch= 446, mse_train=0.55, mse_val=1.83 epoch= 447, mse_train=0.55, mse_val=1.83 epoch= 448, mse_train=0.55, mse_val=1.83 epoch= 449, mse_train=0.55, mse_val=1.84 epoch= 450, mse_train=0.55, mse_val=1.84 epoch= 451, mse_train=0.55, mse_val=1.84 epoch= 452, mse_train=0.55, mse_val=1.85 epoch= 453, mse_train=0.55, mse_val=1.85 epoch= 454, mse_train=0.55, mse_val=1.85 epoch= 455, mse_train=0.55, mse_val=1.86 epoch= 456, mse_train=0.55, mse_val=1.86 epoch= 457, mse_train=0.55, mse_val=1.86 epoch= 458, mse_train=0.55, mse_val=1.86 epoch= 459, mse_train=0.55, mse_val=1.87 epoch= 460, mse_train=0.55, mse_val=1.87 epoch= 461, mse_train=0.55, mse_val=1.87 epoch= 462, mse_train=0.55, mse_val=1.88 epoch= 463, mse_train=0.54, mse_val=1.88 epoch= 464, mse_train=0.54, mse_val=1.88 epoch= 465, mse_train=0.54, mse_val=1.89 epoch= 466, mse_train=0.54, mse_val=1.89 epoch= 467, mse_train=0.54, mse_val=1.89 epoch= 468, mse_train=0.54, mse_val=1.89 epoch= 469, mse_train=0.54, mse_val=1.90 epoch= 470, mse_train=0.54, mse_val=1.90 epoch= 471, mse_train=0.54, mse_val=1.90
c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn(
epoch= 472, mse_train=0.54, mse_val=1.91 epoch= 473, mse_train=0.54, mse_val=1.91 epoch= 474, mse_train=0.54, mse_val=1.91 epoch= 475, mse_train=0.54, mse_val=1.92 epoch= 476, mse_train=0.54, mse_val=1.92 epoch= 477, mse_train=0.54, mse_val=1.92 epoch= 478, mse_train=0.54, mse_val=1.92 epoch= 479, mse_train=0.54, mse_val=1.93 epoch= 480, mse_train=0.54, mse_val=1.93 epoch= 481, mse_train=0.54, mse_val=1.93 epoch= 482, mse_train=0.54, mse_val=1.94 epoch= 483, mse_train=0.54, mse_val=1.94 epoch= 484, mse_train=0.54, mse_val=1.94 epoch= 485, mse_train=0.54, mse_val=1.95 epoch= 486, mse_train=0.54, mse_val=1.95 epoch= 487, mse_train=0.54, mse_val=1.95 epoch= 488, mse_train=0.54, mse_val=1.95 epoch= 489, mse_train=0.54, mse_val=1.96 epoch= 490, mse_train=0.54, mse_val=1.96 epoch= 491, mse_train=0.54, mse_val=1.96 epoch= 492, mse_train=0.54, mse_val=1.97 epoch= 493, mse_train=0.54, mse_val=1.97 epoch= 494, mse_train=0.54, mse_val=1.97 epoch= 495, mse_train=0.54, mse_val=1.97 epoch= 496, mse_train=0.54, mse_val=1.98 epoch= 497, mse_train=0.54, mse_val=1.98 epoch= 498, mse_train=0.54, mse_val=1.98 epoch= 499, mse_train=0.54, mse_val=1.99 OK
c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( c:\Users\Bjark\source\repos\MAL\MAL_V11\Lib\site-packages\sklearn\linear_model\_stochastic_gradient.py:1561: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn(
# Run code: Qb(part III)
best_epoch = np.argmin(val_errors)
best_val_rmse = np.sqrt(val_errors[best_epoch])
plt.figure(figsize=(10,5))
plt.annotate('Best model',
xy=(best_epoch, best_val_rmse),
xytext=(best_epoch, best_val_rmse + 1),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.05),
fontsize=16,
)
best_val_rmse -= 0.03 # just to make the graph look better
plt.plot([0, n_epochs], [best_val_rmse, best_val_rmse], "k:", linewidth=2)
plt.plot(np.sqrt(train_errors), "b--", linewidth=2, label="Training set")
plt.plot(np.sqrt(val_errors), "g-", linewidth=3, label="Validation set")
plt.legend(loc="upper right", fontsize=14)
plt.xlabel("Epoch", fontsize=14)
plt.ylabel("RMSE", fontsize=14)
plt.show()
print('OK')
OK
Qc) Early Stopping¶
Description
How would you implement ___early stopping___, in the code above?Write an explanation of the early stopping concept...that is, just write some pseudo code that 'implements' the early stopping.
OPTIONAL: also implement your early stopping pseudo code in Python, and get it to work with the code above (and not just flipping the hyperparameter to early_stopping=True on the SGDRegressor).
For implementing early_stopping i would before the for loop add two values. One for the model and one for the best model. Inside the for loop I would check if this iteration of validation error is better the the best validataion error. This means this iteration is less than the current best validation error. Then i would add a check when the value of the this iteration becomes bigger than the best value, then break.
best_val_error = float('inf')
best_model = None
#For loop here
if val_error < best_val_error:
best_val_error = val_error
best_model = clone(sgd_reg)
# Check for early stopping condition
if val_error > best_val_error:
print(f"Early stopping at epoch {epoch}")
break
Qd) Explain the Polynomial RMSE-Capacity plot¶
Description
Now we revisit the concepts from `capacity_under_overfitting.ipynb` notebook and the polynomial fitting with a given capacity (polynomial degree).Peek into the cell below (code similar to what we saw in capacity_under_overfitting.ipynb), and explain the generated RMSE-Capacity plot. Why does the training error keep dropping, while the CV-error drops until around capacity 3, and then begin to rise again?
What does the x-axis Capacity and y-axis RMSE represent?
Try increasing the model capacity. What happens when you do plots for degrees larger than around 10? Relate this with what you found via Qa+b in capacity_under_overfitting.ipynb.
# Run and review this code
# NOTE: modified code from [GITHOML], 04_training_linear_models.ipynb
%matplotlib inline
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
def true_fun(X):
return np.cos(1.5 * np.pi * X)
def GenerateData():
n_samples = 30
#degrees = [1, 4, 15]
degrees = range(1,8)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
return X, y, degrees
np.random.seed(0)
X, y, degrees = GenerateData()
print("Iterating...degrees=",degrees)
capacities, rmses_training, rmses_validation= [], [], []
for i in range(len(degrees)):
d=degrees[i]
polynomial_features = PolynomialFeatures(degree=d, include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([
("polynomial_features", polynomial_features),
("linear_regression", linear_regression)
])
Z = X[:, np.newaxis]
pipeline.fit(Z, y)
p = pipeline.predict(Z)
train_rms = mean_squared_error(y,p)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, Z, y, scoring="neg_mean_squared_error", cv=10)
score_mean = -scores.mean()
rmse_training=sqrt(train_rms)
rmse_validation=sqrt(score_mean)
print(f" degree={d:4d}, rmse_training={rmse_training:4.2f}, rmse_cv={rmse_validation:4.2f}")
capacities .append(d)
rmses_training .append(rmse_training)
rmses_validation.append(rmse_validation)
plt.figure(figsize=(7,4))
plt.plot(capacities, rmses_training, "b--", linewidth=2, label="training RMSE")
plt.plot(capacities, rmses_validation,"g-", linewidth=2, label="validation RMSE")
plt.legend(loc="upper right", fontsize=14)
plt.xlabel("Capacity", fontsize=14)
plt.ylabel("RMSE", fontsize=14)
plt.show()
print('OK')
SWMAL Exercise¶
When instantiating a Scikit-learn model in python most or all constructor parameters have default values. These values are not part of the internal model and are hence called hyperparameters---in contrast to normal model parameters, for example the neuron weights, $\mathbf w$, for an MLP model.
Manual Tuning Hyperparameters¶
Below is an example of the python constructor for the support-vector classifier sklearn.svm.SVC, with say the kernel hyperparameter having the default value 'rbf'. If you should choose, what would you set it to other than 'rbf'?
class sklearn.svm.SVC(
C=1.0,
kernel=ārbfā,
degree=3,
gamma=āauto_deprecatedā,
coef0=0.0,
shrinking=True,
probability=False,
tol=0.001,
cache_size=200,
class_weight=None,
verbose=False,
max_iter=-1,
decision_function_shape=āovrā,
random_state=None
)
The default values might be a sensible general starting point, but for your data, you might want to optimize the hyperparameters to yield a better result.
To be able to set kernel to a sensible value you need to go into the documentation for the SVC and understand what the kernel parameter represents, and what values it can be set to, and you need to understand the consequences of setting kernel to something different than the default...and the story repeats for every other hyperparameter!
Brute Force Search¶
An alternative to this structured, but time-consuming approach, is just to brute-force a search of interesting hyperparameters, and choose the 'best' parameters according to a fit-predict and some score, say 'f1'.
Now, you just pick out some hyperparameters, that you figure are important, set them to a suitable range, say
'kernel':('linear', 'rbf'),
'C':[1, 10]
and fire up a full (grid) search on this hyperparameter set, that will try out all your specified combination of kernel and C for the model, and then prints the hyperparameter set with the highest score...
The demo code below sets up some of our well known 'hello-world' data and then run a grid search on a particular model, here a support-vector classifier (SVC)
Other models and datasets ('mnist', 'iris', 'moon') can also be examined.
Qa Explain GridSearchCV¶
Description
There are two code cells below: 1) function setup, 2) the actual grid-search.Review the code cells and write a short summary. Mainly focus on cell 2, but dig into cell 1 if you find it interesting (notice the use of local-function, a nifty feature in python).
In detail, examine the lines:
grid_tuned = GridSearchCV(model, tuning_parameters, ..
grid_tuned.fit(X_train, y_train)
..
FullReport(grid_tuned , X_test, y_test, time_gridsearch)
and write a short description of how the GridSeachCV works: explain how the search parameter set is created and the overall search mechanism is functioning (without going into too much detail).
What role does the parameter scoring='f1_micro' play in the GridSearchCV, and what does n_jobs=-1 mean?
Start with setting up the data with the function LoadAndSetUpData('iris'), which load the dataset based on a string. It can load different data 'iris', 'moon', or 'mnist'. Inside the function it check the the data is valid and splits it into train and test set.
Afterwards we set up the search parameters as a support vecotr machine classifier with the value gamma=0.001. The gamma influence the decision boundary, so a lower gamma means it is more sensivtive to individual data points.
We specifi some of the hyperparameter for the (full) grid search. Here we try out the specified combination of kernel and C for the model , and then prints the hyperparameter set with the highest score. kernel specifies that the grid search will use two different kernel types. The ``rbfstand for radial basis function.C` defines the values for the regularization parameter. The requlaization is used elude overfitting, because model learns the training data too well and struggles to generalize to new, unseen data.
The setting CV stands for how many cross-validation. VERBOSE is used for observe the training progress for each epoch. verbos=0 will show you nothing (silent).
GridSearchCV(model, tuning_parameters, cv=CV, scoring='f1_micro', verbose=VERBOSE, n_jobs=-1) creates a gridsearch object and inputs are the explaned parameter. It uses a f1_mirco as a score and it calculates the F1 score for each class, then computes the average, giving equal weight to each class. The last hyperparameter is uses all available processors for parallel computing (n_jobs=-1)
from time import time
import numpy as np
import sys
from sklearn import svm
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, train_test_split
from sklearn.metrics import classification_report, f1_score
from sklearn import datasets
import sys,os
sys.path.append(os.path.expanduser('../'))
from libitmal import dataloaders as itmaldataloaders # Needed for load of iris, moon and mnist
currmode="N/A" # GLOBAL var!
def SearchReport(model):
def GetBestModelCTOR(model, best_params):
def GetParams(best_params):
ret_str=""
for key in sorted(best_params):
value = best_params[key]
temp_str = "'" if str(type(value))=="<class 'str'>" else ""
if len(ret_str)>0:
ret_str += ','
ret_str += f'{key}={temp_str}{value}{temp_str}'
return ret_str
try:
param_str = GetParams(best_params)
return type(model).__name__ + '(' + param_str + ')'
except:
return "N/A(1)"
print("\nBest model set found on train set:")
print()
print(f"\tbest parameters={model.best_params_}")
print(f"\tbest '{model.scoring}' score={model.best_score_}")
print(f"\tbest index={model.best_index_}")
print()
print(f"Best estimator CTOR:")
print(f"\t{model.best_estimator_}")
print()
try:
print(f"Grid scores ('{model.scoring}') on development set:")
means = model.cv_results_['mean_test_score']
stds = model.cv_results_['std_test_score']
i=0
for mean, std, params in zip(means, stds, model.cv_results_['params']):
print("\t[%2d]: %0.3f (+/-%0.03f) for %r" % (i, mean, std * 2, params))
i += 1
except:
print("WARNING: the random search do not provide means/stds")
global currmode
assert "f1_micro"==str(model.scoring), f"come on, we need to fix the scoring to be able to compare model-fits! Your scoreing={str(model.scoring)}...remember to add scoring='f1_micro' to the search"
return f"best: dat={currmode}, score={model.best_score_:0.5f}, model={GetBestModelCTOR(model.estimator,model.best_params_)}", model.best_estimator_
def ClassificationReport(model, X_test, y_test, target_names=None):
assert X_test.shape[0]==y_test.shape[0]
print("\nDetailed classification report:")
print("\tThe model is trained on the full development set.")
print("\tThe scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, model.predict(X_test)
print(classification_report(y_true, y_pred, target_names=target_names))
print()
def FullReport(model, X_test, y_test, t):
print(f"SEARCH TIME: {t:0.2f} sec")
beststr, bestmodel = SearchReport(model)
ClassificationReport(model, X_test, y_test)
print(f"CTOR for best model: {bestmodel}\n")
print(f"{beststr}\n")
return beststr, bestmodel
def LoadAndSetupData(mode, test_size=0.3):
assert test_size>=0.0 and test_size<=1.0
def ShapeToString(Z):
n = Z.ndim
s = "("
for i in range(n):
s += f"{Z.shape[i]:5d}"
if i+1!=n:
s += ";"
return s+")"
global currmode
currmode=mode
print(f"DATA: {currmode}..")
if mode=='moon':
X, y = itmaldataloaders.MOON_GetDataSet(n_samples=5000, noise=0.2)
itmaldataloaders.MOON_Plot(X, y)
elif mode=='mnist':
X, y = itmaldataloaders.MNIST_GetDataSet(load_mode=0)
if X.ndim==3:
X=np.reshape(X, (X.shape[0], -1))
elif mode=='iris':
X, y = itmaldataloaders.IRIS_GetDataSet()
else:
raise ValueError(f"could not load data for that particular mode='{mode}', only 'moon'/'mnist'/'iris' supported")
print(f' org. data: X.shape ={ShapeToString(X)}, y.shape ={ShapeToString(y)}')
assert X.ndim==2
assert X.shape[0]==y.shape[0]
assert y.ndim==1 or (y.ndim==2 and y.shape[1]==0)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=test_size, random_state=0, shuffle=True
)
print(f' train data: X_train.shape={ShapeToString(X_train)}, y_train.shape={ShapeToString(y_train)}')
print(f' test data: X_test.shape ={ShapeToString(X_test)}, y_test.shape ={ShapeToString(y_test)}')
print()
return X_train, X_test, y_train, y_test
def TryKerasImport(verbose=True):
kerasok = True
try:
import keras as keras_try
except:
kerasok = False
tensorflowkerasok = True
try:
import tensorflow.keras as tensorflowkeras_try
except:
tensorflowkerasok = False
ok = kerasok or tensorflowkerasok
if not ok and verbose:
if not kerasok:
print("WARNING: importing 'keras' failed", file=sys.stderr)
if not tensorflowkerasok:
print("WARNING: importing 'tensorflow.keras' failed", file=sys.stderr)
return ok
print(f"OK(function setup" + ("" if TryKerasImport() else ", hope MNIST loads works because it seems you miss the installation of Keras or Tensorflow!") + ")")
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/urllib3/__init__.py:34: NotOpenSSLWarning: urllib3 v2.0 only supports OpenSSL 1.1.1+, currently the 'ssl' module is compiled with 'LibreSSL 2.8.3'. See: https://github.com/urllib3/urllib3/issues/3020 warnings.warn(
OK(function setup)
# Setup data
X_train, X_test, y_train, y_test = LoadAndSetupData(
'iris') # 'iris', 'moon', or 'mnist'
# Setup search parameters
model = svm.SVC(
gamma=0.001
) # NOTE: gamma="scale" does *not work in older Scikit-learn frameworks,
# FIX: replace with model = svm.SVC(gamma=0.001)
tuning_parameters = {
'kernel': ('linear', 'rbf'),
'C': [0.1, 1, 10]
}
CV = 5
VERBOSE = 0
# Run GridSearchCV for the model
grid_tuned = GridSearchCV(model,
tuning_parameters,
cv=CV,
scoring='f1_micro',
verbose=VERBOSE,
n_jobs=-1)
start = time()
grid_tuned.fit(X_train, y_train)
t = time() - start
# Report result
b0, m0 = FullReport(grid_tuned, X_test, y_test, t)
print('OK(grid-search)')
DATA: iris..
org. data: X.shape =( 150; 4), y.shape =( 150)
train data: X_train.shape=( 105; 4), y_train.shape=( 105)
test data: X_test.shape =( 45; 4), y_test.shape =( 45)
SEARCH TIME: 0.92 sec
Best model set found on train set:
best parameters={'C': 1, 'kernel': 'linear'}
best 'f1_micro' score=0.9714285714285715
best index=2
Best estimator CTOR:
SVC(C=1, gamma=0.001, kernel='linear')
Grid scores ('f1_micro') on development set:
[ 0]: 0.962 (+/-0.093) for {'C': 0.1, 'kernel': 'linear'}
[ 1]: 0.371 (+/-0.038) for {'C': 0.1, 'kernel': 'rbf'}
[ 2]: 0.971 (+/-0.047) for {'C': 1, 'kernel': 'linear'}
[ 3]: 0.695 (+/-0.047) for {'C': 1, 'kernel': 'rbf'}
[ 4]: 0.952 (+/-0.085) for {'C': 10, 'kernel': 'linear'}
[ 5]: 0.924 (+/-0.097) for {'C': 10, 'kernel': 'rbf'}
Detailed classification report:
The model is trained on the full development set.
The scores are computed on the full evaluation set.
precision recall f1-score support
0 1.00 1.00 1.00 16
1 1.00 0.94 0.97 18
2 0.92 1.00 0.96 11
accuracy 0.98 45
macro avg 0.97 0.98 0.98 45
weighted avg 0.98 0.98 0.98 45
CTOR for best model: SVC(C=1, gamma=0.001, kernel='linear')
best: dat=iris, score=0.97143, model=SVC(C=1,kernel='linear')
OK(grid-search)
Qb Hyperparameter Grid Search using an SDG classifier¶
Description
Now, replace the `svm.SVC` model with an `SGDClassifier` and a suitable set of the hyperparameters for that model.You need at least four or five different hyperparameters from the SGDClassifier in the search-space before it begins to take considerable compute time doing the full grid search.
So, repeat the search with the SGDClassifier, and be sure to add enough hyperparameters to the grid-search, such that the search takes a considerable time to run, that is a couple of minutes or up to some hours..
We have now replaced the svm.SVC model with an SGDClassifier and changed the hyperparameter for the model. We know uses the parameter different loss function. We also put max number of iteration and regularization for reduce overfitting, however also less flexible model. eta0 is the initial learning rate. Penalty is the detementain the type of regulaization.
tuning_parameters = {
'loss': ['hinge', 'squared_hinge', 'perceptron'],
'max_iter': [300, 900, 1400],
'alpha': [0.0001, 0.001, 0.01],
'eta0': [0.01, 0.1],
'penalty': ['l1', 'l2'],
'learning_rate': ['constant', 'optimal', 'invscaling'],
'power_t': [0.1, 0.5],
'early_stopping': [False],
'validation_fraction': [0.1, 0.2],
'n_iter_no_change': [5, 10, 20],
}
as we can observe the best parameter was
Best estimator CTOR:
SGDClassifier(alpha=0.001, eta0=0.01, loss='perceptron', max_iter=300,
penalty='l1', random_state=42)
from sklearn.linear_model import SGDClassifier
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.SGDClassifier.html
# Setup data
X_train, X_test, y_train, y_test = LoadAndSetupData(
'iris') # 'iris', 'moon', or 'mnist'
# Setup search parameters
model = SGDClassifier(
loss='hinge',
random_state=42
)
tuning_parameters = {
'loss': ['hinge', 'squared_hinge', 'perceptron'],
'max_iter': [300, 900, 1400],
'alpha': [0.0001, 0.001, 0.01],
'eta0': [0.01, 0.1],
'penalty': ['l1', 'l2'],
'learning_rate': ['constant', 'optimal', 'invscaling'],
'power_t': [0.1, 0.5],
'early_stopping': [False],
'validation_fraction': [0.1, 0.2],
'n_iter_no_change': [5, 10, 20],
}
CV = 5
VERBOSE = 0
# Run GridSearchCV for the model
grid_tuned = GridSearchCV(model,
tuning_parameters,
cv=CV,
scoring='f1_micro',
verbose=VERBOSE,
n_jobs=-1)
start = time()
grid_tuned.fit(X_train, y_train)
t = time() - start
# Report result
b0, m0 = FullReport(grid_tuned, X_test, y_test, t)
print('OK(grid-search)')
DATA: iris.. org. data: X.shape =( 150; 4), y.shape =( 150) train data: X_train.shape=( 105; 4), y_train.shape=( 105) test data: X_test.shape =( 45; 4), y_test.shape =( 45)
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn( /Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/linear_model/_stochastic_gradient.py:713: ConvergenceWarning: Maximum number of iteration reached before convergence. Consider increasing max_iter to improve the fit. warnings.warn(
SEARCH TIME: 4.83 sec
Best model set found on train set:
best parameters={'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
best 'f1_micro' score=0.9904761904761905
best index=1672
Best estimator CTOR:
SGDClassifier(alpha=0.001, eta0=0.01, loss='perceptron', max_iter=300,
n_iter_no_change=20, penalty='l1', power_t=0.1, random_state=42)
Grid scores ('f1_micro') on development set:
[ 0]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[ 1]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[ 2]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[ 3]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[ 4]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[ 5]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[ 6]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[ 7]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[ 8]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[ 9]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[10]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[11]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[12]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[13]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[14]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[15]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[16]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[17]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[18]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[19]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[20]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[21]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[22]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[23]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[24]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[25]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[26]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[27]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[28]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[29]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[30]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[31]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[32]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[33]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[34]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[35]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[36]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[37]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[38]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[39]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[40]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[41]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[42]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[43]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[44]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[45]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[46]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[47]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[48]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[49]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[50]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[51]: 0.905 (+/-0.085) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[52]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[53]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[54]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[55]: 0.886 (+/-0.238) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[56]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[57]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[58]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[59]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[60]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[61]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[62]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[63]: 0.924 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[64]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[65]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[66]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[67]: 0.876 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[68]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[69]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[70]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[71]: 0.905 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[72]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[73]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[74]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[75]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[76]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[77]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[78]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[79]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[80]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[81]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[82]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[83]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[84]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[85]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[86]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[87]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[88]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[89]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[90]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[91]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[92]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[93]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[94]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[95]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[96]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[97]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[98]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[99]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[100]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[101]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[102]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[103]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[104]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[105]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[106]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[107]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[108]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[109]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[110]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[111]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[112]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[113]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[114]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[115]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[116]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[117]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[118]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[119]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[120]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[121]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[122]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[123]: 0.714 (+/-0.313) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[124]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[125]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[126]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[127]: 0.705 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[128]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[129]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[130]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[131]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[132]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[133]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[134]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[135]: 0.848 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[136]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[137]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[138]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[139]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[140]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[141]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[142]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[143]: 0.829 (+/-0.273) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[144]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[145]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[146]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[147]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[148]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[149]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[150]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[151]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[152]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[153]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[154]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[155]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[156]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[157]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[158]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[159]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[160]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[161]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[162]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[163]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[164]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[165]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[166]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[167]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[168]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[169]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[170]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[171]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[172]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[173]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[174]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[175]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[176]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[177]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[178]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[179]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[180]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[181]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[182]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[183]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[184]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[185]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[186]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[187]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[188]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[189]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[190]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[191]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[192]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[193]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[194]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[195]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[196]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[197]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[198]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[199]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[200]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[201]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[202]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[203]: 0.781 (+/-0.214) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[204]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[205]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[206]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[207]: 0.743 (+/-0.461) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[208]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[209]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[210]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[211]: 0.686 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[212]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[213]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[214]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[215]: 0.829 (+/-0.286) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[216]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[217]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[218]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[219]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[220]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[221]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[222]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[223]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[224]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[225]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[226]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[227]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[228]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[229]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[230]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[231]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[232]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[233]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[234]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[235]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[236]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[237]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[238]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[239]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[240]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[241]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[242]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[243]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[244]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[245]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[246]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[247]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[248]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[249]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[250]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[251]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[252]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[253]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[254]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[255]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[256]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[257]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[258]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[259]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[260]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[261]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[262]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[263]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[264]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[265]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[266]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[267]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[268]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[269]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[270]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[271]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[272]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[273]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[274]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[275]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[276]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[277]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[278]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[279]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[280]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[281]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[282]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[283]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[284]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[285]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[286]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[287]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[288]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[289]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[290]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[291]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[292]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[293]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[294]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[295]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[296]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[297]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[298]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[299]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[300]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[301]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[302]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[303]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[304]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[305]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[306]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[307]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[308]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[309]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[310]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[311]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[312]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[313]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[314]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[315]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[316]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[317]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[318]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[319]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[320]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[321]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[322]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[323]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[324]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[325]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[326]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[327]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[328]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[329]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[330]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[331]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[332]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[333]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[334]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[335]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[336]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[337]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[338]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[339]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[340]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[341]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[342]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[343]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[344]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[345]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[346]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[347]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[348]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[349]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[350]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[351]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[352]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[353]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[354]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[355]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[356]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[357]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[358]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[359]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[360]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[361]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[362]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[363]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[364]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[365]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[366]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[367]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[368]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[369]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[370]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[371]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[372]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[373]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[374]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[375]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[376]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[377]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[378]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[379]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[380]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[381]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[382]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[383]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[384]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[385]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[386]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[387]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[388]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[389]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[390]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[391]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[392]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[393]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[394]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[395]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[396]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[397]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[398]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[399]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[400]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[401]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[402]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[403]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[404]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[405]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[406]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[407]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[408]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[409]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[410]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[411]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[412]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[413]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[414]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[415]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[416]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[417]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[418]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[419]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[420]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[421]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[422]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[423]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[424]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[425]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[426]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[427]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[428]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[429]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[430]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[431]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[432]: 0.933 (+/-0.114) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[433]: 0.933 (+/-0.114) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[434]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[435]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[436]: 0.914 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[437]: 0.914 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[438]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[439]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[440]: 0.886 (+/-0.222) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[441]: 0.886 (+/-0.222) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[442]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[443]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[444]: 0.924 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[445]: 0.924 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[446]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[447]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[448]: 0.905 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[449]: 0.905 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[450]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[451]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[452]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[453]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[454]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[455]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[456]: 0.933 (+/-0.114) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[457]: 0.933 (+/-0.114) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[458]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[459]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[460]: 0.914 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[461]: 0.914 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[462]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[463]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[464]: 0.886 (+/-0.222) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[465]: 0.886 (+/-0.222) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[466]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[467]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[468]: 0.924 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[469]: 0.924 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[470]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[471]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[472]: 0.905 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[473]: 0.905 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[474]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[475]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[476]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[477]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[478]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[479]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[480]: 0.933 (+/-0.114) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[481]: 0.933 (+/-0.114) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[482]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[483]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[484]: 0.914 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[485]: 0.914 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[486]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[487]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[488]: 0.886 (+/-0.222) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[489]: 0.886 (+/-0.222) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[490]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[491]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[492]: 0.924 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[493]: 0.924 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[494]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[495]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[496]: 0.905 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[497]: 0.905 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[498]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[499]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[500]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[501]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[502]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[503]: 0.695 (+/-0.047) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[504]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[505]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[506]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[507]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[508]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[509]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[510]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[511]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[512]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[513]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[514]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[515]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[516]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[517]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[518]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[519]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[520]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[521]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[522]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[523]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[524]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[525]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[526]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[527]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[528]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[529]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[530]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[531]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[532]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[533]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[534]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[535]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[536]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[537]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[538]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[539]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[540]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[541]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[542]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[543]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[544]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[545]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[546]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[547]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[548]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[549]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[550]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[551]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[552]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[553]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[554]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[555]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[556]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[557]: 0.952 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[558]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[559]: 0.848 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[560]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[561]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[562]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[563]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[564]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[565]: 0.914 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[566]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[567]: 0.867 (+/-0.126) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[568]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[569]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[570]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[571]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[572]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[573]: 0.867 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[574]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[575]: 0.857 (+/-0.104) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[576]: 0.848 (+/-0.279) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[577]: 0.848 (+/-0.279) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[578]: 0.914 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[579]: 0.914 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[580]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[581]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[582]: 0.876 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[583]: 0.876 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[584]: 0.771 (+/-0.440) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[585]: 0.771 (+/-0.440) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[586]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[587]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[588]: 0.667 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[589]: 0.667 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[590]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[591]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[592]: 0.695 (+/-0.506) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[593]: 0.695 (+/-0.506) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[594]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[595]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[596]: 0.743 (+/-0.374) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[597]: 0.743 (+/-0.374) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[598]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[599]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[600]: 0.848 (+/-0.279) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[601]: 0.848 (+/-0.279) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[602]: 0.914 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[603]: 0.914 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[604]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[605]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[606]: 0.876 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[607]: 0.876 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[608]: 0.771 (+/-0.440) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[609]: 0.771 (+/-0.440) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[610]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[611]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[612]: 0.667 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[613]: 0.667 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[614]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[615]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[616]: 0.695 (+/-0.506) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[617]: 0.695 (+/-0.506) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[618]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[619]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[620]: 0.743 (+/-0.374) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[621]: 0.743 (+/-0.374) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[622]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[623]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[624]: 0.848 (+/-0.279) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[625]: 0.848 (+/-0.279) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[626]: 0.914 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[627]: 0.914 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[628]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[629]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[630]: 0.876 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[631]: 0.876 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[632]: 0.771 (+/-0.440) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[633]: 0.771 (+/-0.440) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[634]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[635]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[636]: 0.667 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[637]: 0.667 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[638]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[639]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[640]: 0.695 (+/-0.506) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[641]: 0.695 (+/-0.506) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[642]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[643]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[644]: 0.743 (+/-0.374) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[645]: 0.743 (+/-0.374) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[646]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[647]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[648]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[649]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[650]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[651]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[652]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[653]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[654]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[655]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[656]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[657]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[658]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[659]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[660]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[661]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[662]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[663]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[664]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[665]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[666]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[667]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[668]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[669]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[670]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[671]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[672]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[673]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[674]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[675]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[676]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[677]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[678]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[679]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[680]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[681]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[682]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[683]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[684]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[685]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[686]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[687]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[688]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[689]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[690]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[691]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[692]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[693]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[694]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[695]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[696]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[697]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[698]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[699]: 0.781 (+/-0.333) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[700]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[701]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[702]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[703]: 0.857 (+/-0.248) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[704]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[705]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[706]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[707]: 0.819 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[708]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[709]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[710]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[711]: 0.867 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[712]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[713]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[714]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[715]: 0.752 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[716]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[717]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[718]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[719]: 0.781 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[720]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[721]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[722]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[723]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[724]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[725]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[726]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[727]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[728]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[729]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[730]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[731]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[732]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[733]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[734]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[735]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[736]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[737]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[738]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[739]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[740]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[741]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[742]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[743]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[744]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[745]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[746]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[747]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[748]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[749]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[750]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[751]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[752]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[753]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[754]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[755]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[756]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[757]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[758]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[759]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[760]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[761]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[762]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[763]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[764]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[765]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[766]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[767]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[768]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[769]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[770]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[771]: 0.705 (+/-0.392) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[772]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[773]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[774]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[775]: 0.724 (+/-0.378) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[776]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[777]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[778]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[779]: 0.638 (+/-0.260) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[780]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[781]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[782]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[783]: 0.810 (+/-0.233) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[784]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[785]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[786]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[787]: 0.752 (+/-0.220) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[788]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[789]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[790]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[791]: 0.781 (+/-0.453) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[792]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[793]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[794]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[795]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[796]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[797]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[798]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[799]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[800]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[801]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[802]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[803]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[804]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[805]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[806]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[807]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[808]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[809]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[810]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[811]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[812]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[813]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[814]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[815]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[816]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[817]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[818]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[819]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[820]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[821]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[822]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[823]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[824]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[825]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[826]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[827]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[828]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[829]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[830]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[831]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[832]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[833]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[834]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[835]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[836]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[837]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[838]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[839]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[840]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[841]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[842]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[843]: 0.781 (+/-0.364) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[844]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[845]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[846]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[847]: 0.933 (+/-0.076) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[848]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[849]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[850]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[851]: 0.790 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[852]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[853]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[854]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[855]: 0.667 (+/-0.395) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[856]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[857]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[858]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[859]: 0.686 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[860]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[861]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[862]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[863]: 0.733 (+/-0.322) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[864]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[865]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[866]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[867]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[868]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[869]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[870]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[871]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[872]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[873]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[874]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[875]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[876]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[877]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[878]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[879]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[880]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[881]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[882]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[883]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[884]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[885]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[886]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[887]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[888]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[889]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[890]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[891]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[892]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[893]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[894]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[895]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[896]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[897]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[898]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[899]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[900]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[901]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[902]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[903]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[904]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[905]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[906]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[907]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[908]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[909]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[910]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[911]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[912]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[913]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[914]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[915]: 0.819 (+/-0.304) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[916]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[917]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[918]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[919]: 0.895 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[920]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[921]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[922]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[923]: 0.895 (+/-0.185) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[924]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[925]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[926]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[927]: 0.790 (+/-0.230) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[928]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[929]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[930]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[931]: 0.962 (+/-0.038) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[932]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[933]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[934]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[935]: 0.886 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[936]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[937]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[938]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[939]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[940]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[941]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[942]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[943]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[944]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[945]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[946]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[947]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[948]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[949]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[950]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[951]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[952]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[953]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[954]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[955]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[956]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[957]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[958]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[959]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[960]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[961]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[962]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[963]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[964]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[965]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[966]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[967]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[968]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[969]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[970]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[971]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[972]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[973]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[974]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[975]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[976]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[977]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[978]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[979]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[980]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[981]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[982]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[983]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[984]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[985]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[986]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[987]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[988]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[989]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[990]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[991]: 0.857 (+/-0.295) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[992]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[993]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[994]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[995]: 0.943 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[996]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[997]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[998]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[999]: 0.810 (+/-0.181) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1000]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1001]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1002]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1003]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1004]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1005]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1006]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1007]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1008]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1009]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1010]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1011]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1012]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1013]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1014]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1015]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1016]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1017]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1018]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1019]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1020]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1021]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1022]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1023]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1024]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1025]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1026]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1027]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1028]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1029]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1030]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1031]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1032]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1033]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1034]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1035]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1036]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1037]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1038]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1039]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1040]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1041]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1042]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1043]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1044]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1045]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1046]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1047]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1048]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1049]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1050]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1051]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1052]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1053]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1054]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1055]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1056]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1057]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1058]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1059]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1060]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1061]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1062]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1063]: 0.762 (+/-0.508) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1064]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1065]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1066]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1067]: 0.895 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1068]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1069]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1070]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1071]: 0.800 (+/-0.212) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1072]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1073]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1074]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1075]: 0.952 (+/-0.060) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1076]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1077]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1078]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1079]: 0.876 (+/-0.205) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1080]: 0.933 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1081]: 0.933 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1082]: 0.810 (+/-0.120) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1083]: 0.810 (+/-0.120) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1084]: 0.819 (+/-0.251) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1085]: 0.819 (+/-0.251) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1086]: 0.829 (+/-0.155) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1087]: 0.829 (+/-0.155) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1088]: 0.876 (+/-0.129) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1089]: 0.876 (+/-0.129) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1090]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1091]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1092]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1093]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1094]: 0.895 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1095]: 0.895 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1096]: 0.905 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1097]: 0.905 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1098]: 0.886 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1099]: 0.886 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1100]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1101]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1102]: 0.876 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1103]: 0.876 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1104]: 0.933 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1105]: 0.933 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1106]: 0.810 (+/-0.120) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1107]: 0.810 (+/-0.120) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1108]: 0.819 (+/-0.251) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1109]: 0.819 (+/-0.251) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1110]: 0.829 (+/-0.155) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1111]: 0.829 (+/-0.155) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1112]: 0.876 (+/-0.129) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1113]: 0.876 (+/-0.129) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1114]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1115]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1116]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1117]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1118]: 0.895 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1119]: 0.895 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1120]: 0.905 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1121]: 0.905 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1122]: 0.886 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1123]: 0.886 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1124]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1125]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1126]: 0.876 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1127]: 0.876 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1128]: 0.933 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1129]: 0.933 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1130]: 0.810 (+/-0.120) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1131]: 0.810 (+/-0.120) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1132]: 0.819 (+/-0.251) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1133]: 0.819 (+/-0.251) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1134]: 0.829 (+/-0.155) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1135]: 0.829 (+/-0.155) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1136]: 0.876 (+/-0.129) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1137]: 0.876 (+/-0.129) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1138]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1139]: 0.886 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1140]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1141]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1142]: 0.895 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1143]: 0.895 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1144]: 0.905 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1145]: 0.905 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1146]: 0.886 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1147]: 0.886 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1148]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1149]: 0.781 (+/-0.143) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1150]: 0.876 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1151]: 0.876 (+/-0.177) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1152]: 0.581 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1153]: 0.581 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1154]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1155]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1156]: 0.790 (+/-0.328) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1157]: 0.790 (+/-0.328) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1158]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1159]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1160]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1161]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1162]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1163]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1164]: 0.781 (+/-0.166) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1165]: 0.781 (+/-0.166) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1166]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1167]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1168]: 0.667 (+/-0.459) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1169]: 0.667 (+/-0.459) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1170]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1171]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1172]: 0.752 (+/-0.194) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1173]: 0.752 (+/-0.194) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1174]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1175]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1176]: 0.581 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1177]: 0.581 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1178]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1179]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1180]: 0.790 (+/-0.328) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1181]: 0.790 (+/-0.328) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1182]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1183]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1184]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1185]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1186]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1187]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1188]: 0.781 (+/-0.166) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1189]: 0.781 (+/-0.166) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1190]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1191]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1192]: 0.667 (+/-0.459) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1193]: 0.667 (+/-0.459) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1194]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1195]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1196]: 0.752 (+/-0.194) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1197]: 0.752 (+/-0.194) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1198]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1199]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1200]: 0.581 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1201]: 0.581 (+/-0.327) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1202]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1203]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1204]: 0.790 (+/-0.328) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1205]: 0.790 (+/-0.328) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1206]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1207]: 0.867 (+/-0.152) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1208]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1209]: 0.829 (+/-0.245) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1210]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1211]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1212]: 0.781 (+/-0.166) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1213]: 0.781 (+/-0.166) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1214]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1215]: 0.857 (+/-0.170) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1216]: 0.667 (+/-0.459) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1217]: 0.667 (+/-0.459) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1218]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1219]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1220]: 0.752 (+/-0.194) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1221]: 0.752 (+/-0.194) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1222]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1223]: 0.838 (+/-0.196) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1224]: 0.857 (+/-0.289) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1225]: 0.857 (+/-0.289) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1226]: 0.933 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1227]: 0.933 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1228]: 0.800 (+/-0.265) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1229]: 0.800 (+/-0.265) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1230]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1231]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1232]: 0.752 (+/-0.472) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1233]: 0.752 (+/-0.472) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1234]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1235]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1236]: 0.857 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1237]: 0.857 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1238]: 0.943 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1239]: 0.943 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1240]: 0.800 (+/-0.332) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1241]: 0.800 (+/-0.332) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1242]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1243]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1244]: 0.810 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1245]: 0.810 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1246]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1247]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1248]: 0.857 (+/-0.289) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1249]: 0.857 (+/-0.289) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1250]: 0.933 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1251]: 0.933 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1252]: 0.800 (+/-0.265) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1253]: 0.800 (+/-0.265) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1254]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1255]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1256]: 0.752 (+/-0.472) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1257]: 0.752 (+/-0.472) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1258]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1259]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1260]: 0.857 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1261]: 0.857 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1262]: 0.943 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1263]: 0.943 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1264]: 0.800 (+/-0.332) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1265]: 0.800 (+/-0.332) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1266]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1267]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1268]: 0.810 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1269]: 0.810 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1270]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1271]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1272]: 0.857 (+/-0.289) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1273]: 0.857 (+/-0.289) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1274]: 0.933 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1275]: 0.933 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1276]: 0.800 (+/-0.265) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1277]: 0.800 (+/-0.265) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1278]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1279]: 0.914 (+/-0.111) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1280]: 0.752 (+/-0.472) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1281]: 0.752 (+/-0.472) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1282]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1283]: 0.924 (+/-0.097) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1284]: 0.857 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1285]: 0.857 (+/-0.190) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1286]: 0.943 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1287]: 0.943 (+/-0.093) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1288]: 0.800 (+/-0.332) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1289]: 0.800 (+/-0.332) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1290]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1291]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1292]: 0.810 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1293]: 0.810 (+/-0.200) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1294]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1295]: 0.962 (+/-0.071) for {'alpha': 0.0001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1296]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1297]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1298]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1299]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1300]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1301]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1302]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1303]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1304]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1305]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1306]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1307]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1308]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1309]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1310]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1311]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1312]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1313]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1314]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1315]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1316]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1317]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1318]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1319]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1320]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1321]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1322]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1323]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1324]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1325]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1326]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1327]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1328]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1329]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1330]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1331]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1332]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1333]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1334]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1335]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1336]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1337]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1338]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1339]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1340]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1341]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1342]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1343]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1344]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1345]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1346]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1347]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1348]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1349]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1350]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1351]: 0.886 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1352]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1353]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1354]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1355]: 0.943 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1356]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1357]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1358]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1359]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1360]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1361]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1362]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1363]: 0.886 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1364]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1365]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1366]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1367]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1368]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1369]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1370]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1371]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1372]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1373]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1374]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1375]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1376]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1377]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1378]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1379]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1380]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1381]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1382]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1383]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1384]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1385]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1386]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1387]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1388]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1389]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1390]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1391]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1392]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1393]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1394]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1395]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1396]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1397]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1398]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1399]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1400]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1401]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1402]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1403]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1404]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1405]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1406]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1407]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1408]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1409]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1410]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1411]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1412]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1413]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1414]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1415]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1416]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1417]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1418]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1419]: 0.724 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1420]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1421]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1422]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1423]: 0.705 (+/-0.327) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1424]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1425]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1426]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1427]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1428]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1429]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1430]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1431]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1432]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1433]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1434]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1435]: 0.829 (+/-0.273) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1436]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1437]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1438]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1439]: 0.819 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1440]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1441]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1442]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1443]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1444]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1445]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1446]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1447]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1448]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1449]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1450]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1451]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1452]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1453]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1454]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1455]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1456]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1457]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1458]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1459]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1460]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1461]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1462]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1463]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1464]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1465]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1466]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1467]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1468]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1469]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1470]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1471]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1472]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1473]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1474]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1475]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1476]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1477]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1478]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1479]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1480]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1481]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1482]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1483]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1484]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1485]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1486]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1487]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1488]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1489]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1490]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1491]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1492]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1493]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1494]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1495]: 0.933 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1496]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1497]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1498]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1499]: 0.629 (+/-0.251) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1500]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1501]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1502]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1503]: 0.667 (+/-0.395) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1504]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1505]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1506]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1507]: 0.848 (+/-0.236) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1508]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1509]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1510]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1511]: 0.695 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1512]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1513]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1514]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1515]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1516]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1517]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1518]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1519]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1520]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1521]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1522]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1523]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1524]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1525]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1526]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1527]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1528]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1529]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1530]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1531]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1532]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1533]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1534]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1535]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1536]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1537]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1538]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1539]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1540]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1541]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1542]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1543]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1544]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1545]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1546]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1547]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1548]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1549]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1550]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1551]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1552]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1553]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1554]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1555]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1556]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1557]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1558]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1559]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1560]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1561]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1562]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1563]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1564]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1565]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1566]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1567]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1568]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1569]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1570]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1571]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1572]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1573]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1574]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1575]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1576]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1577]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1578]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1579]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1580]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1581]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1582]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1583]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1584]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1585]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1586]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1587]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1588]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1589]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1590]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1591]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1592]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1593]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1594]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1595]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1596]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1597]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1598]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1599]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1600]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1601]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1602]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1603]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1604]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1605]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1606]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1607]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1608]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1609]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1610]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1611]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1612]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1613]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1614]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1615]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1616]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1617]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1618]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1619]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1620]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1621]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1622]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1623]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1624]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1625]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1626]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1627]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1628]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1629]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1630]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1631]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1632]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1633]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1634]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1635]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1636]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1637]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1638]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1639]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1640]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1641]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1642]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1643]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1644]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1645]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1646]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1647]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1648]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1649]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1650]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1651]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1652]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1653]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1654]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1655]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1656]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1657]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1658]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1659]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1660]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1661]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1662]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1663]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1664]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1665]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1666]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1667]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1668]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1669]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1670]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1671]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1672]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1673]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1674]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1675]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1676]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1677]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1678]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1679]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1680]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1681]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1682]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1683]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1684]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1685]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1686]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1687]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1688]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1689]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1690]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1691]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1692]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1693]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1694]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1695]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1696]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1697]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1698]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1699]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1700]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1701]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1702]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1703]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1704]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1705]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1706]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1707]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1708]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1709]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1710]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1711]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1712]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1713]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1714]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1715]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1716]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1717]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1718]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1719]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1720]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1721]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1722]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1723]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1724]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1725]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1726]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1727]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1728]: 0.876 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1729]: 0.876 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1730]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1731]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1732]: 0.886 (+/-0.245) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1733]: 0.886 (+/-0.245) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1734]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1735]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1736]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1737]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1738]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1739]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1740]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1741]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1742]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1743]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1744]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1745]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1746]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1747]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1748]: 0.905 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1749]: 0.905 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1750]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1751]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1752]: 0.876 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1753]: 0.876 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1754]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1755]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1756]: 0.886 (+/-0.245) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1757]: 0.886 (+/-0.245) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1758]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1759]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1760]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1761]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1762]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1763]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1764]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1765]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1766]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1767]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1768]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1769]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1770]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1771]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1772]: 0.905 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1773]: 0.905 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1774]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1775]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1776]: 0.876 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1777]: 0.876 (+/-0.143) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1778]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1779]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1780]: 0.886 (+/-0.245) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1781]: 0.886 (+/-0.245) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1782]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1783]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1784]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1785]: 0.905 (+/-0.085) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1786]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1787]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1788]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1789]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1790]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1791]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1792]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1793]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1794]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1795]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1796]: 0.905 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1797]: 0.905 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1798]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1799]: 0.695 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1800]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1801]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1802]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1803]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1804]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1805]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1806]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1807]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1808]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1809]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1810]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1811]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1812]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1813]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1814]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1815]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1816]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1817]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1818]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1819]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1820]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1821]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1822]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1823]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1824]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1825]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1826]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1827]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1828]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1829]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1830]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1831]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1832]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1833]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1834]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1835]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1836]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1837]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1838]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1839]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1840]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1841]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1842]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1843]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1844]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1845]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1846]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1847]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1848]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1849]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1850]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1851]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1852]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1853]: 0.952 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1854]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1855]: 0.848 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1856]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1857]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1858]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1859]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1860]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1861]: 0.914 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1862]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1863]: 0.867 (+/-0.126) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1864]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1865]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1866]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1867]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1868]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1869]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1870]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1871]: 0.857 (+/-0.104) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1872]: 0.638 (+/-0.524) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1873]: 0.638 (+/-0.524) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1874]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1875]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1876]: 0.829 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1877]: 0.829 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1878]: 0.895 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1879]: 0.895 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1880]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1881]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1882]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1883]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1884]: 0.876 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1885]: 0.876 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1886]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1887]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1888]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1889]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1890]: 0.914 (+/-0.203) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1891]: 0.914 (+/-0.203) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1892]: 0.800 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1893]: 0.800 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1894]: 0.886 (+/-0.187) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1895]: 0.886 (+/-0.187) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1896]: 0.638 (+/-0.524) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1897]: 0.638 (+/-0.524) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1898]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1899]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1900]: 0.829 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1901]: 0.829 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1902]: 0.895 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1903]: 0.895 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1904]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1905]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1906]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1907]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1908]: 0.876 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1909]: 0.876 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1910]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1911]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1912]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1913]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1914]: 0.914 (+/-0.203) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1915]: 0.914 (+/-0.203) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1916]: 0.800 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1917]: 0.800 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1918]: 0.886 (+/-0.187) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1919]: 0.886 (+/-0.187) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1920]: 0.638 (+/-0.524) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1921]: 0.638 (+/-0.524) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1922]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1923]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1924]: 0.829 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1925]: 0.829 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1926]: 0.895 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1927]: 0.895 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1928]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1929]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1930]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1931]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1932]: 0.876 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1933]: 0.876 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1934]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1935]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1936]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1937]: 0.867 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1938]: 0.914 (+/-0.203) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1939]: 0.914 (+/-0.203) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1940]: 0.800 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1941]: 0.800 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1942]: 0.886 (+/-0.187) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1943]: 0.886 (+/-0.187) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1944]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1945]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1946]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1947]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1948]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1949]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1950]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1951]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1952]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1953]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1954]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1955]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1956]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1957]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1958]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1959]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1960]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1961]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1962]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1963]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1964]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1965]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1966]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1967]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1968]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1969]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1970]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1971]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1972]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1973]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1974]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1975]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1976]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1977]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1978]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1979]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1980]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1981]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1982]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1983]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1984]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1985]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1986]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1987]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1988]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1989]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1990]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1991]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[1992]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[1993]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[1994]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[1995]: 0.848 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[1996]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[1997]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[1998]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[1999]: 0.848 (+/-0.383) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2000]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2001]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2002]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2003]: 0.838 (+/-0.214) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2004]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2005]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2006]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2007]: 0.705 (+/-0.164) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2008]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2009]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2010]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2011]: 0.838 (+/-0.322) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2012]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2013]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2014]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2015]: 0.686 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2016]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2017]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2018]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2019]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2020]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2021]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2022]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2023]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2024]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2025]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2026]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2027]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2028]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2029]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2030]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2031]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2032]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2033]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2034]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2035]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2036]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2037]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2038]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2039]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2040]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2041]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2042]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2043]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2044]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2045]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2046]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2047]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2048]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2049]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2050]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2051]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2052]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2053]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2054]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2055]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2056]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2057]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2058]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2059]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2060]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2061]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2062]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2063]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2064]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2065]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2066]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2067]: 0.657 (+/-0.452) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2068]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2069]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2070]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2071]: 0.543 (+/-0.349) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2072]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2073]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2074]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2075]: 0.771 (+/-0.229) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2076]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2077]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2078]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2079]: 0.848 (+/-0.244) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2080]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2081]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2082]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2083]: 0.562 (+/-0.373) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2084]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2085]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2086]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2087]: 0.686 (+/-0.311) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2088]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2089]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2090]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2091]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2092]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2093]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2094]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2095]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2096]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2097]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2098]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2099]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2100]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2101]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2102]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2103]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2104]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2105]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2106]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2107]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2108]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2109]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2110]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2111]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2112]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2113]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2114]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2115]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2116]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2117]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2118]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2119]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2120]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2121]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2122]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2123]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2124]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2125]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2126]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2127]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2128]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2129]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2130]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2131]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2132]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2133]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2134]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2135]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2136]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2137]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2138]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2139]: 0.838 (+/-0.230) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2140]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2141]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2142]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2143]: 0.848 (+/-0.304) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2144]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2145]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2146]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2147]: 0.648 (+/-0.129) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2148]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2149]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2150]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2151]: 0.724 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2152]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2153]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2154]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2155]: 0.819 (+/-0.258) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2156]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2157]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2158]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2159]: 0.743 (+/-0.260) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2160]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2161]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2162]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2163]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2164]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2165]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2166]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2167]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2168]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2169]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2170]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2171]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2172]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2173]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2174]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2175]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2176]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2177]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2178]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2179]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2180]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2181]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2182]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2183]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2184]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2185]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2186]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2187]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2188]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2189]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2190]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2191]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2192]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2193]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2194]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2195]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2196]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2197]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2198]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2199]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2200]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2201]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2202]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2203]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2204]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2205]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2206]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2207]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2208]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2209]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2210]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2211]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2212]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2213]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2214]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2215]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2216]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2217]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2218]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2219]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2220]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2221]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2222]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2223]: 0.857 (+/-0.148) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2224]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2225]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2226]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2227]: 0.981 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2228]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2229]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2230]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2231]: 0.914 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2232]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2233]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2234]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2235]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2236]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2237]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2238]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2239]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2240]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2241]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2242]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2243]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2244]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2245]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2246]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2247]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2248]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2249]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2250]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2251]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2252]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2253]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2254]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2255]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2256]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2257]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2258]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2259]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2260]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2261]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2262]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2263]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2264]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2265]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2266]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2267]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2268]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2269]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2270]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2271]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2272]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2273]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2274]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2275]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2276]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2277]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2278]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2279]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2280]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2281]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2282]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2283]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2284]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2285]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2286]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2287]: 0.943 (+/-0.140) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2288]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2289]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2290]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2291]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2292]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2293]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2294]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2295]: 0.924 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2296]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2297]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2298]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2299]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2300]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2301]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2302]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2303]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2304]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2305]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2306]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2307]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2308]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2309]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2310]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2311]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2312]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2313]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2314]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2315]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2316]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2317]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2318]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2319]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2320]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2321]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2322]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2323]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2324]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2325]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2326]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2327]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2328]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2329]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2330]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2331]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2332]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2333]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2334]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2335]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2336]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2337]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2338]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2339]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2340]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2341]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2342]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2343]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2344]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2345]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2346]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2347]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2348]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2349]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2350]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2351]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2352]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2353]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2354]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2355]: 0.962 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2356]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2357]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2358]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2359]: 0.933 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2360]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2361]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2362]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2363]: 0.914 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2364]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2365]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2366]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2367]: 0.886 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2368]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2369]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2370]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2371]: 0.990 (+/-0.038) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2372]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2373]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2374]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2375]: 0.905 (+/-0.135) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2376]: 0.886 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2377]: 0.886 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2378]: 0.790 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2379]: 0.790 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2380]: 0.857 (+/-0.200) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2381]: 0.857 (+/-0.200) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2382]: 0.781 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2383]: 0.781 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2384]: 0.829 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2385]: 0.829 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2386]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2387]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2388]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2389]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2390]: 0.895 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2391]: 0.895 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2392]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2393]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2394]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2395]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2396]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2397]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2398]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2399]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2400]: 0.886 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2401]: 0.886 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2402]: 0.790 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2403]: 0.790 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2404]: 0.857 (+/-0.200) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2405]: 0.857 (+/-0.200) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2406]: 0.781 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2407]: 0.781 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2408]: 0.829 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2409]: 0.829 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2410]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2411]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2412]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2413]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2414]: 0.895 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2415]: 0.895 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2416]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2417]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2418]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2419]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2420]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2421]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2422]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2423]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2424]: 0.886 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2425]: 0.886 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2426]: 0.790 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2427]: 0.790 (+/-0.166) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2428]: 0.857 (+/-0.200) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2429]: 0.857 (+/-0.200) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2430]: 0.781 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2431]: 0.781 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2432]: 0.829 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2433]: 0.829 (+/-0.155) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2434]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2435]: 0.886 (+/-0.097) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2436]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2437]: 0.838 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2438]: 0.895 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2439]: 0.895 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2440]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2441]: 0.867 (+/-0.212) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2442]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2443]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2444]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2445]: 0.924 (+/-0.076) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2446]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2447]: 0.876 (+/-0.177) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2448]: 0.610 (+/-0.607) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2449]: 0.610 (+/-0.607) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2450]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2451]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2452]: 0.695 (+/-0.333) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2453]: 0.695 (+/-0.333) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2454]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2455]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2456]: 0.829 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2457]: 0.829 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2458]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2459]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2460]: 0.781 (+/-0.267) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2461]: 0.781 (+/-0.267) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2462]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2463]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2464]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2465]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2466]: 0.838 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2467]: 0.838 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2468]: 0.667 (+/-0.455) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2469]: 0.667 (+/-0.455) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2470]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2471]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2472]: 0.610 (+/-0.607) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2473]: 0.610 (+/-0.607) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2474]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2475]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2476]: 0.695 (+/-0.333) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2477]: 0.695 (+/-0.333) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2478]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2479]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2480]: 0.829 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2481]: 0.829 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2482]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2483]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2484]: 0.781 (+/-0.267) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2485]: 0.781 (+/-0.267) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2486]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2487]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2488]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2489]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2490]: 0.838 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2491]: 0.838 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2492]: 0.667 (+/-0.455) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2493]: 0.667 (+/-0.455) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2494]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2495]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2496]: 0.610 (+/-0.607) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2497]: 0.610 (+/-0.607) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2498]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2499]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2500]: 0.695 (+/-0.333) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2501]: 0.695 (+/-0.333) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2502]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2503]: 0.867 (+/-0.152) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2504]: 0.829 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2505]: 0.829 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2506]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2507]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2508]: 0.781 (+/-0.267) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2509]: 0.781 (+/-0.267) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2510]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2511]: 0.857 (+/-0.170) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2512]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2513]: 0.876 (+/-0.222) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2514]: 0.838 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2515]: 0.838 (+/-0.196) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2516]: 0.667 (+/-0.455) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2517]: 0.667 (+/-0.455) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2518]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2519]: 0.848 (+/-0.185) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2520]: 0.657 (+/-0.456) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2521]: 0.657 (+/-0.456) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2522]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2523]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2524]: 0.838 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2525]: 0.838 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2526]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2527]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2528]: 0.800 (+/-0.378) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2529]: 0.800 (+/-0.378) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2530]: 0.971 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2531]: 0.971 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2532]: 0.724 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2533]: 0.724 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2534]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2535]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2536]: 0.848 (+/-0.220) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2537]: 0.848 (+/-0.220) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2538]: 0.952 (+/-0.060) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2539]: 0.952 (+/-0.060) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2540]: 0.848 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2541]: 0.848 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2542]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2543]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2544]: 0.657 (+/-0.456) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2545]: 0.657 (+/-0.456) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2546]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2547]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2548]: 0.838 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2549]: 0.838 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2550]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2551]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2552]: 0.800 (+/-0.378) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2553]: 0.800 (+/-0.378) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2554]: 0.971 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2555]: 0.971 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2556]: 0.724 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2557]: 0.724 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2558]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2559]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2560]: 0.848 (+/-0.220) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2561]: 0.848 (+/-0.220) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2562]: 0.952 (+/-0.060) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2563]: 0.952 (+/-0.060) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2564]: 0.848 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2565]: 0.848 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2566]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2567]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2568]: 0.657 (+/-0.456) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2569]: 0.657 (+/-0.456) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2570]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2571]: 0.943 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2572]: 0.838 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2573]: 0.838 (+/-0.205) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2574]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2575]: 0.895 (+/-0.111) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2576]: 0.800 (+/-0.378) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2577]: 0.800 (+/-0.378) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2578]: 0.971 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2579]: 0.971 (+/-0.047) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2580]: 0.724 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2581]: 0.724 (+/-0.093) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2582]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2583]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2584]: 0.848 (+/-0.220) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2585]: 0.848 (+/-0.220) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2586]: 0.952 (+/-0.060) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2587]: 0.952 (+/-0.060) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2588]: 0.848 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2589]: 0.848 (+/-0.194) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2590]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2591]: 0.962 (+/-0.071) for {'alpha': 0.001, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2592]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2593]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2594]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2595]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2596]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2597]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2598]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2599]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2600]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2601]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2602]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2603]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2604]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2605]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2606]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2607]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2608]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2609]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2610]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2611]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2612]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2613]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2614]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2615]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2616]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2617]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2618]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2619]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2620]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2621]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2622]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2623]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2624]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2625]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2626]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2627]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2628]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2629]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2630]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2631]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2632]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2633]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2634]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2635]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2636]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2637]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2638]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2639]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2640]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2641]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2642]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2643]: 0.867 (+/-0.212) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2644]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2645]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2646]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2647]: 0.838 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2648]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2649]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2650]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2651]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2652]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2653]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2654]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2655]: 0.914 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2656]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2657]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2658]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2659]: 0.924 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2660]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2661]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2662]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2663]: 0.895 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2664]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2665]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2666]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2667]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2668]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2669]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2670]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2671]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2672]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2673]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2674]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2675]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2676]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2677]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2678]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2679]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2680]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2681]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2682]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2683]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2684]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2685]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2686]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2687]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2688]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2689]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2690]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2691]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2692]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2693]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2694]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2695]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2696]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2697]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2698]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2699]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2700]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2701]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2702]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2703]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2704]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2705]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2706]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2707]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2708]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2709]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2710]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2711]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2712]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2713]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2714]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2715]: 0.781 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2716]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2717]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2718]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2719]: 0.714 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2720]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2721]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2722]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2723]: 0.886 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2724]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2725]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2726]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2727]: 0.829 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2728]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2729]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2730]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2731]: 0.810 (+/-0.295) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2732]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2733]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2734]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2735]: 0.800 (+/-0.279) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2736]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2737]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2738]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2739]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2740]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2741]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2742]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2743]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2744]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2745]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2746]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2747]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2748]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2749]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2750]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2751]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2752]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2753]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2754]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2755]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2756]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2757]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2758]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2759]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2760]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2761]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2762]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2763]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2764]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2765]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2766]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2767]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2768]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2769]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2770]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2771]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2772]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2773]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2774]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2775]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2776]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2777]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2778]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2779]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2780]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2781]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2782]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2783]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2784]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2785]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2786]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2787]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2788]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2789]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2790]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2791]: 0.848 (+/-0.304) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2792]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2793]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2794]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2795]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2796]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2797]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2798]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2799]: 0.724 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2800]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2801]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2802]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2803]: 0.495 (+/-0.344) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2804]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2805]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2806]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2807]: 0.743 (+/-0.260) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2808]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2809]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2810]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2811]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2812]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2813]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2814]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2815]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2816]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2817]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2818]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2819]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2820]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2821]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2822]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2823]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2824]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2825]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2826]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2827]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2828]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2829]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2830]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2831]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2832]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2833]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2834]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2835]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2836]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2837]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2838]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2839]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2840]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2841]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2842]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2843]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2844]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2845]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2846]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2847]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2848]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2849]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2850]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2851]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2852]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2853]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2854]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2855]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2856]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2857]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2858]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2859]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2860]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2861]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2862]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2863]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2864]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2865]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2866]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2867]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2868]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2869]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2870]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2871]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2872]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2873]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2874]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2875]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2876]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2877]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2878]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2879]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2880]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2881]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2882]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2883]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2884]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2885]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2886]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2887]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2888]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2889]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2890]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2891]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2892]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2893]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2894]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2895]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2896]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2897]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2898]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2899]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2900]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2901]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2902]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2903]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2904]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2905]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2906]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2907]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2908]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2909]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2910]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2911]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2912]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2913]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2914]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2915]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2916]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2917]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2918]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2919]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2920]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2921]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2922]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2923]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2924]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2925]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2926]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2927]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2928]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2929]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2930]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2931]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2932]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2933]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2934]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2935]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2936]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2937]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2938]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2939]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2940]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2941]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2942]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2943]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2944]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2945]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2946]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2947]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2948]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2949]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2950]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2951]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2952]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2953]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2954]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2955]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2956]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2957]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2958]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2959]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2960]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2961]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2962]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2963]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2964]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2965]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2966]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2967]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2968]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2969]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2970]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2971]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2972]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2973]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2974]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2975]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2976]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2977]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2978]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2979]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2980]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2981]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2982]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2983]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2984]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2985]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2986]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2987]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2988]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2989]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2990]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2991]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[2992]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[2993]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[2994]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[2995]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[2996]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[2997]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[2998]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[2999]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3000]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3001]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3002]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3003]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3004]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3005]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3006]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3007]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3008]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3009]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3010]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3011]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3012]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3013]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3014]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3015]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3016]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3017]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3018]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3019]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3020]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3021]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3022]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3023]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3024]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3025]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3026]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3027]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3028]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3029]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3030]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3031]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3032]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3033]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3034]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3035]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3036]: 0.876 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3037]: 0.876 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3038]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3039]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3040]: 0.905 (+/-0.135) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3041]: 0.905 (+/-0.135) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3042]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3043]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3044]: 0.829 (+/-0.166) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3045]: 0.829 (+/-0.166) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3046]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3047]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3048]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3049]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3050]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3051]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3052]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3053]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3054]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3055]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3056]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3057]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3058]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3059]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3060]: 0.876 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3061]: 0.876 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3062]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3063]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3064]: 0.905 (+/-0.135) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3065]: 0.905 (+/-0.135) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3066]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3067]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3068]: 0.829 (+/-0.166) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3069]: 0.829 (+/-0.166) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3070]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3071]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3072]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3073]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3074]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3075]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3076]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3077]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3078]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3079]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3080]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3081]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3082]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3083]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3084]: 0.876 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3085]: 0.876 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3086]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3087]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3088]: 0.905 (+/-0.135) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3089]: 0.905 (+/-0.135) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3090]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3091]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3092]: 0.829 (+/-0.166) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3093]: 0.829 (+/-0.166) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3094]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3095]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3096]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3097]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3098]: 0.819 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3099]: 0.819 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3100]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3101]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3102]: 0.838 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3103]: 0.838 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3104]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3105]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3106]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3107]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3108]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3109]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3110]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3111]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3112]: 0.867 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3113]: 0.867 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3114]: 0.848 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3115]: 0.848 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3116]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3117]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3118]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3119]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3120]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3121]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3122]: 0.819 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3123]: 0.819 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3124]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3125]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3126]: 0.838 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3127]: 0.838 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3128]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3129]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3130]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3131]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3132]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3133]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3134]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3135]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3136]: 0.867 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3137]: 0.867 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3138]: 0.848 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3139]: 0.848 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3140]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3141]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3142]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3143]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3144]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3145]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3146]: 0.819 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3147]: 0.819 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3148]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3149]: 0.943 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3150]: 0.838 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3151]: 0.838 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3152]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3153]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3154]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3155]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3156]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3157]: 0.924 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3158]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3159]: 0.867 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3160]: 0.867 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3161]: 0.867 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3162]: 0.848 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3163]: 0.848 (+/-0.093) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3164]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3165]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3166]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3167]: 0.857 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3168]: 0.657 (+/-0.523) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3169]: 0.657 (+/-0.523) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3170]: 0.895 (+/-0.229) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3171]: 0.895 (+/-0.229) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3172]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3173]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3174]: 0.905 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3175]: 0.905 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3176]: 0.724 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3177]: 0.724 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3178]: 0.943 (+/-0.071) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3179]: 0.943 (+/-0.071) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3180]: 0.733 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3181]: 0.733 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3182]: 0.924 (+/-0.114) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3183]: 0.924 (+/-0.114) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3184]: 0.733 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3185]: 0.733 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3186]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3187]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3188]: 0.781 (+/-0.280) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3189]: 0.781 (+/-0.280) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3190]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3191]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3192]: 0.657 (+/-0.523) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3193]: 0.657 (+/-0.523) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3194]: 0.895 (+/-0.229) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3195]: 0.895 (+/-0.229) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3196]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3197]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3198]: 0.905 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3199]: 0.905 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3200]: 0.724 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3201]: 0.724 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3202]: 0.943 (+/-0.071) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3203]: 0.943 (+/-0.071) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3204]: 0.733 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3205]: 0.733 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3206]: 0.924 (+/-0.114) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3207]: 0.924 (+/-0.114) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3208]: 0.733 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3209]: 0.733 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3210]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3211]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3212]: 0.781 (+/-0.280) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3213]: 0.781 (+/-0.280) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3214]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3215]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3216]: 0.657 (+/-0.523) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3217]: 0.657 (+/-0.523) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3218]: 0.895 (+/-0.229) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3219]: 0.895 (+/-0.229) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3220]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3221]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3222]: 0.905 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3223]: 0.905 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3224]: 0.724 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3225]: 0.724 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3226]: 0.943 (+/-0.071) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3227]: 0.943 (+/-0.071) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3228]: 0.733 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3229]: 0.733 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3230]: 0.924 (+/-0.114) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3231]: 0.924 (+/-0.114) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3232]: 0.733 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3233]: 0.733 (+/-0.245) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3234]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3235]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3236]: 0.781 (+/-0.280) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3237]: 0.781 (+/-0.280) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3238]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3239]: 0.905 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.01, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3240]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3241]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3242]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3243]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3244]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3245]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3246]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3247]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3248]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3249]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3250]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3251]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3252]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3253]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3254]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3255]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3256]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3257]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3258]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3259]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3260]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3261]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3262]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3263]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3264]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3265]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3266]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3267]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3268]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3269]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3270]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3271]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3272]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3273]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3274]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3275]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3276]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3277]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3278]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3279]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3280]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3281]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3282]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3283]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3284]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3285]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3286]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3287]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3288]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3289]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3290]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3291]: 0.838 (+/-0.273) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3292]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3293]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3294]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3295]: 0.743 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3296]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3297]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3298]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3299]: 0.695 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3300]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3301]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3302]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3303]: 0.714 (+/-0.060) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3304]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3305]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3306]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3307]: 0.733 (+/-0.311) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3308]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3309]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3310]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3311]: 0.724 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3312]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3313]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3314]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3315]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3316]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3317]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3318]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3319]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3320]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3321]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3322]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3323]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3324]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3325]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3326]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3327]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3328]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3329]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3330]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3331]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3332]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3333]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3334]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3335]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3336]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3337]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3338]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3339]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3340]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3341]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3342]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3343]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3344]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3345]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3346]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3347]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3348]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3349]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3350]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3351]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3352]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3353]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3354]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3355]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3356]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3357]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3358]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3359]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3360]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3361]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3362]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3363]: 0.533 (+/-0.505) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3364]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3365]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3366]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3367]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3368]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3369]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3370]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3371]: 0.752 (+/-0.348) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3372]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3373]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3374]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3375]: 0.571 (+/-0.335) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3376]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3377]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3378]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3379]: 0.848 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3380]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3381]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3382]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3383]: 0.648 (+/-0.328) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3384]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3385]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3386]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3387]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3388]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3389]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3390]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3391]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3392]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3393]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3394]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3395]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3396]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3397]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3398]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3399]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3400]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3401]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3402]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3403]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3404]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3405]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3406]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3407]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3408]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3409]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3410]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3411]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3412]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3413]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3414]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3415]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3416]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3417]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3418]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3419]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3420]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3421]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3422]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3423]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3424]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3425]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3426]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3427]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3428]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3429]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3430]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3431]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3432]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3433]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3434]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3435]: 0.581 (+/-0.480) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3436]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3437]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3438]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3439]: 0.733 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3440]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3441]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3442]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3443]: 0.752 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3444]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3445]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3446]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3447]: 0.638 (+/-0.267) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3448]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3449]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3450]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3451]: 0.514 (+/-0.406) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3452]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3453]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3454]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3455]: 0.600 (+/-0.492) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'constant', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3456]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3457]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3458]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3459]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3460]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3461]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3462]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3463]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3464]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3465]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3466]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3467]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3468]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3469]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3470]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3471]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3472]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3473]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3474]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3475]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3476]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3477]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3478]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3479]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3480]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3481]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3482]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3483]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3484]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3485]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3486]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3487]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3488]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3489]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3490]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3491]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3492]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3493]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3494]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3495]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3496]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3497]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3498]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3499]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3500]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3501]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3502]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3503]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3504]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3505]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3506]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3507]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3508]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3509]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3510]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3511]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3512]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3513]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3514]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3515]: 0.962 (+/-0.111) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3516]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3517]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3518]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3519]: 0.848 (+/-0.185) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3520]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3521]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3522]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3523]: 0.981 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3524]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3525]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3526]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3527]: 0.933 (+/-0.097) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3528]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3529]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3530]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3531]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3532]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3533]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3534]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3535]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3536]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3537]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3538]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3539]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3540]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3541]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3542]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3543]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3544]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3545]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3546]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3547]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3548]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3549]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3550]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3551]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3552]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3553]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3554]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3555]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3556]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3557]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3558]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3559]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3560]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3561]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3562]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3563]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3564]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3565]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3566]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3567]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3568]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3569]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3570]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3571]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3572]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3573]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3574]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3575]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3576]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3577]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3578]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3579]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3580]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3581]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3582]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3583]: 0.819 (+/-0.194) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3584]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3585]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3586]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3587]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3588]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3589]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3590]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3591]: 0.933 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3592]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3593]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3594]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3595]: 0.990 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3596]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3597]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3598]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3599]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3600]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3601]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3602]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3603]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3604]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3605]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3606]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3607]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3608]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3609]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3610]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3611]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3612]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3613]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3614]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3615]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3616]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3617]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3618]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3619]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3620]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3621]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3622]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3623]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3624]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3625]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3626]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3627]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3628]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3629]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3630]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3631]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3632]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3633]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3634]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3635]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3636]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3637]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3638]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3639]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3640]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3641]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3642]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3643]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3644]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3645]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3646]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3647]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3648]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3649]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3650]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3651]: 0.962 (+/-0.038) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3652]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3653]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3654]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3655]: 0.876 (+/-0.230) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3656]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3657]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3658]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3659]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3660]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3661]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3662]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3663]: 0.895 (+/-0.203) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3664]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3665]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3666]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3667]: 0.971 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3668]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3669]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3670]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3671]: 0.943 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'optimal', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3672]: 0.800 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3673]: 0.800 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3674]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3675]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3676]: 0.857 (+/-0.341) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3677]: 0.857 (+/-0.341) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3678]: 0.810 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3679]: 0.810 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3680]: 0.848 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3681]: 0.848 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3682]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3683]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3684]: 0.848 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3685]: 0.848 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3686]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3687]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3688]: 0.895 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3689]: 0.895 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3690]: 0.886 (+/-0.143) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3691]: 0.886 (+/-0.143) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3692]: 0.771 (+/-0.251) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3693]: 0.771 (+/-0.251) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3694]: 0.857 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3695]: 0.857 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3696]: 0.800 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3697]: 0.800 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3698]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3699]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3700]: 0.857 (+/-0.341) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3701]: 0.857 (+/-0.341) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3702]: 0.810 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3703]: 0.810 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3704]: 0.848 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3705]: 0.848 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3706]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3707]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3708]: 0.848 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3709]: 0.848 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3710]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3711]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3712]: 0.895 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3713]: 0.895 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3714]: 0.886 (+/-0.143) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3715]: 0.886 (+/-0.143) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3716]: 0.771 (+/-0.251) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3717]: 0.771 (+/-0.251) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3718]: 0.857 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3719]: 0.857 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3720]: 0.800 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3721]: 0.800 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3722]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3723]: 0.781 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3724]: 0.857 (+/-0.341) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3725]: 0.857 (+/-0.341) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3726]: 0.810 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3727]: 0.810 (+/-0.120) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3728]: 0.848 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3729]: 0.848 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3730]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3731]: 0.867 (+/-0.164) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3732]: 0.848 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3733]: 0.848 (+/-0.236) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3734]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3735]: 0.886 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3736]: 0.895 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3737]: 0.895 (+/-0.140) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3738]: 0.886 (+/-0.143) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3739]: 0.886 (+/-0.143) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3740]: 0.771 (+/-0.251) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3741]: 0.771 (+/-0.251) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3742]: 0.857 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3743]: 0.857 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3744]: 0.705 (+/-0.612) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3745]: 0.705 (+/-0.612) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3746]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3747]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3748]: 0.467 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3749]: 0.467 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3750]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3751]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3752]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3753]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3754]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3755]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3756]: 0.819 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3757]: 0.819 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3758]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3759]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3760]: 0.743 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3761]: 0.743 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3762]: 0.838 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3763]: 0.838 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3764]: 0.724 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3765]: 0.724 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3766]: 0.838 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3767]: 0.838 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3768]: 0.705 (+/-0.612) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3769]: 0.705 (+/-0.612) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3770]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3771]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3772]: 0.467 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3773]: 0.467 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3774]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3775]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3776]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3777]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3778]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3779]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3780]: 0.819 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3781]: 0.819 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3782]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3783]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3784]: 0.743 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3785]: 0.743 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3786]: 0.838 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3787]: 0.838 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3788]: 0.724 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3789]: 0.724 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3790]: 0.829 (+/-0.222) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3791]: 0.829 (+/-0.222) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3792]: 0.705 (+/-0.612) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3793]: 0.705 (+/-0.612) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3794]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3795]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3796]: 0.467 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3797]: 0.467 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3798]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3799]: 0.867 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3800]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3801]: 0.810 (+/-0.209) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3802]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3803]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3804]: 0.819 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3805]: 0.819 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3806]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3807]: 0.857 (+/-0.170) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3808]: 0.743 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3809]: 0.743 (+/-0.286) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3810]: 0.838 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3811]: 0.838 (+/-0.196) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3812]: 0.724 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3813]: 0.724 (+/-0.152) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3814]: 0.848 (+/-0.229) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3815]: 0.848 (+/-0.229) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'squared_hinge', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3816]: 0.733 (+/-0.364) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3817]: 0.733 (+/-0.364) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3818]: 0.876 (+/-0.205) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3819]: 0.876 (+/-0.205) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3820]: 0.667 (+/-0.439) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3821]: 0.667 (+/-0.439) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3822]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3823]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3824]: 0.781 (+/-0.222) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3825]: 0.781 (+/-0.222) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3826]: 0.886 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3827]: 0.886 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3828]: 0.714 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3829]: 0.714 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3830]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3831]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3832]: 0.724 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3833]: 0.724 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3834]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3835]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3836]: 0.552 (+/-0.299) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3837]: 0.552 (+/-0.299) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3838]: 0.933 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3839]: 0.933 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 300, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3840]: 0.733 (+/-0.364) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3841]: 0.733 (+/-0.364) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3842]: 0.876 (+/-0.205) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3843]: 0.876 (+/-0.205) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3844]: 0.667 (+/-0.439) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3845]: 0.667 (+/-0.439) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3846]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3847]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3848]: 0.781 (+/-0.222) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3849]: 0.781 (+/-0.222) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3850]: 0.886 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3851]: 0.886 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3852]: 0.714 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3853]: 0.714 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3854]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3855]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3856]: 0.724 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3857]: 0.724 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3858]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3859]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3860]: 0.552 (+/-0.299) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3861]: 0.552 (+/-0.299) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3862]: 0.933 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3863]: 0.933 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 900, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3864]: 0.733 (+/-0.364) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3865]: 0.733 (+/-0.364) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3866]: 0.876 (+/-0.205) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3867]: 0.876 (+/-0.205) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3868]: 0.667 (+/-0.439) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3869]: 0.667 (+/-0.439) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3870]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3871]: 0.914 (+/-0.126) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 5, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3872]: 0.781 (+/-0.222) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3873]: 0.781 (+/-0.222) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3874]: 0.886 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3875]: 0.886 (+/-0.129) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3876]: 0.714 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3877]: 0.714 (+/-0.159) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3878]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3879]: 0.952 (+/-0.104) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 10, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
[3880]: 0.724 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.1}
[3881]: 0.724 (+/-0.298) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.1, 'validation_fraction': 0.2}
[3882]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.1}
[3883]: 0.971 (+/-0.076) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l1', 'power_t': 0.5, 'validation_fraction': 0.2}
[3884]: 0.552 (+/-0.299) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.1}
[3885]: 0.552 (+/-0.299) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.1, 'validation_fraction': 0.2}
[3886]: 0.933 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.1}
[3887]: 0.933 (+/-0.047) for {'alpha': 0.01, 'early_stopping': False, 'eta0': 0.1, 'learning_rate': 'invscaling', 'loss': 'perceptron', 'max_iter': 1400, 'n_iter_no_change': 20, 'penalty': 'l2', 'power_t': 0.5, 'validation_fraction': 0.2}
Detailed classification report:
The model is trained on the full development set.
The scores are computed on the full evaluation set.
precision recall f1-score support
0 1.00 1.00 1.00 16
1 1.00 0.89 0.94 18
2 0.85 1.00 0.92 11
accuracy 0.96 45
macro avg 0.95 0.96 0.95 45
weighted avg 0.96 0.96 0.96 45
CTOR for best model: SGDClassifier(alpha=0.001, eta0=0.01, loss='perceptron', max_iter=300,
n_iter_no_change=20, penalty='l1', power_t=0.1, random_state=42)
best: dat=iris, score=0.99048, model=SGDClassifier(alpha=0.001,early_stopping=False,eta0=0.01,learning_rate='optimal',loss='perceptron',max_iter=300,n_iter_no_change=20,penalty='l1',power_t=0.1,validation_fraction=0.1)
OK(grid-search)
Qc Hyperparameter Random Search using an SDG classifier¶
Description
Now, add code to run a `RandomizedSearchCV` instead.
Use these default parameters for the random search, similar to the default parameters for the grid search
random_tuned = RandomizedSearchCV(
model,
tuning_parameters,
n_iter=20,
random_state=42,
cv=CV,
scoring='f1_micro',
verbose=VERBOSE,
n_jobs=-1
)
but with the two new parameters, n_iter and random_state added. Since the search-type is now random, the random_state gives sense, but essential to random search is the new n_tier parameter.
So: investigate the n_iter parameter...in code and write a conceptual explanation in text.
Comparison of time (seconds) to complete GridSearch versus RandomizedSearchCV, does not necessarily give any sense, if your grid search completes in a few seconds (as for the iris tiny-data). You need a search that runs for minutes, hours, or days.
But you could compare the best-tuned parameter set and best scoring for the two methods. Is the random search best model close to the grid search?
We have now replaced the GridSearchCV with RandomizedSearchCV. The n_iter parameter represents the number of iterations over the training dataset. It allows the model to learn from the training data and avoids excessive training times.
We have trained this with a lot of hyperparameters; however, it did not take that long to train. However, when we tried to add one more hyperparameter, it took such a long time that we gave up.
The results we obtained for GridSearch are as follows:
best: dat=iris, score=0.99048, model=SGDClassifier(alpha=0.001,early_stopping=False,eta0=0.01,learning_rate='optimal',loss='perceptron',max_iter=300,n_iter_no_change=20,penalty='l1',power_t=0.1,validation_fraction=0.1)
SEARCH TIME: 11.04 sec
And for RandomizedSearch:
SEARCH TIME: 0.08 sec
best: dat=iris, score=0.96190, model=SGDClassifier(alpha=0.01,early_stopping=False,eta0=0.1,learning_rate='optimal',loss='hinge',max_iter=1400,n_iter_no_change=5,penalty='l1',power_t=0.5,validation_fraction=0.2)
What we can obeserve is that GridSearch is diffently a better result than RandomGridSearch. It was two different models and iteration that was best for each model. There is a long way from 0.96 to 0.99, so random search best model is not close to the grid search.
from sklearn.model_selection import RandomizedSearchCV
# Setup data
X_train, X_test, y_train, y_test = LoadAndSetupData(
'iris') # 'iris', 'moon', or 'mnist'
# Setup search parameters
model = SGDClassifier(
loss='hinge',
random_state=42
)
tuning_parameters = {
'loss': ['hinge', 'squared_hinge', 'perceptron'],
'max_iter': [300, 900, 1400],
'alpha': [0.0001, 0.001, 0.01],
'eta0': [0.01, 0.1],
'penalty': ['l1', 'l2'],
'learning_rate': ['constant', 'optimal', 'invscaling'],
'power_t': [0.1, 0.5],
'early_stopping': [False],
'validation_fraction': [0.1, 0.2],
'n_iter_no_change': [5, 10, 20],
}
CV = 5
VERBOSE = 0
# Run RandomizedSearchCV for the model
grid_tuned = RandomizedSearchCV(model,
tuning_parameters,
n_iter=20,
random_state=42,
cv=CV,
scoring='f1_micro',
verbose=VERBOSE,
n_jobs=-1)
start = time()
grid_tuned.fit(X_train, y_train)
t = time() - start
# Report result
b0, m0 = FullReport(grid_tuned, X_test, y_test, t)
print('OK(grid-search)')
DATA: iris..
org. data: X.shape =( 150; 4), y.shape =( 150)
train data: X_train.shape=( 105; 4), y_train.shape=( 105)
test data: X_test.shape =( 45; 4), y_test.shape =( 45)
SEARCH TIME: 0.06 sec
Best model set found on train set:
best parameters={'validation_fraction': 0.2, 'power_t': 0.5, 'penalty': 'l1', 'n_iter_no_change': 5, 'max_iter': 1400, 'loss': 'hinge', 'learning_rate': 'optimal', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.01}
best 'f1_micro' score=0.9619047619047618
best index=1
Best estimator CTOR:
SGDClassifier(alpha=0.01, eta0=0.1, max_iter=1400, penalty='l1',
random_state=42, validation_fraction=0.2)
Grid scores ('f1_micro') on development set:
[ 0]: 0.905 (+/-0.104) for {'validation_fraction': 0.1, 'power_t': 0.5, 'penalty': 'l2', 'n_iter_no_change': 5, 'max_iter': 300, 'loss': 'perceptron', 'learning_rate': 'invscaling', 'eta0': 0.01, 'early_stopping': False, 'alpha': 0.01}
[ 1]: 0.962 (+/-0.038) for {'validation_fraction': 0.2, 'power_t': 0.5, 'penalty': 'l1', 'n_iter_no_change': 5, 'max_iter': 1400, 'loss': 'hinge', 'learning_rate': 'optimal', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.01}
[ 2]: 0.733 (+/-0.322) for {'validation_fraction': 0.1, 'power_t': 0.1, 'penalty': 'l2', 'n_iter_no_change': 20, 'max_iter': 1400, 'loss': 'perceptron', 'learning_rate': 'constant', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.0001}
[ 3]: 0.962 (+/-0.071) for {'validation_fraction': 0.1, 'power_t': 0.5, 'penalty': 'l2', 'n_iter_no_change': 20, 'max_iter': 1400, 'loss': 'perceptron', 'learning_rate': 'invscaling', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.0001}
[ 4]: 0.810 (+/-0.120) for {'validation_fraction': 0.1, 'power_t': 0.5, 'penalty': 'l1', 'n_iter_no_change': 5, 'max_iter': 1400, 'loss': 'hinge', 'learning_rate': 'invscaling', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.0001}
[ 5]: 0.895 (+/-0.071) for {'validation_fraction': 0.2, 'power_t': 0.5, 'penalty': 'l2', 'n_iter_no_change': 10, 'max_iter': 300, 'loss': 'hinge', 'learning_rate': 'invscaling', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.0001}
[ 6]: 0.467 (+/-0.298) for {'validation_fraction': 0.1, 'power_t': 0.1, 'penalty': 'l2', 'n_iter_no_change': 5, 'max_iter': 900, 'loss': 'squared_hinge', 'learning_rate': 'invscaling', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.01}
[ 7]: 0.829 (+/-0.166) for {'validation_fraction': 0.1, 'power_t': 0.1, 'penalty': 'l2', 'n_iter_no_change': 20, 'max_iter': 1400, 'loss': 'hinge', 'learning_rate': 'invscaling', 'eta0': 0.01, 'early_stopping': False, 'alpha': 0.01}
[ 8]: 0.943 (+/-0.140) for {'validation_fraction': 0.1, 'power_t': 0.5, 'penalty': 'l2', 'n_iter_no_change': 5, 'max_iter': 1400, 'loss': 'squared_hinge', 'learning_rate': 'optimal', 'eta0': 0.01, 'early_stopping': False, 'alpha': 0.001}
[ 9]: 0.962 (+/-0.038) for {'validation_fraction': 0.2, 'power_t': 0.1, 'penalty': 'l1', 'n_iter_no_change': 10, 'max_iter': 300, 'loss': 'hinge', 'learning_rate': 'optimal', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.001}
[10]: 0.695 (+/-0.047) for {'validation_fraction': 0.1, 'power_t': 0.5, 'penalty': 'l1', 'n_iter_no_change': 10, 'max_iter': 900, 'loss': 'hinge', 'learning_rate': 'invscaling', 'eta0': 0.01, 'early_stopping': False, 'alpha': 0.0001}
[11]: 0.943 (+/-0.093) for {'validation_fraction': 0.1, 'power_t': 0.5, 'penalty': 'l2', 'n_iter_no_change': 10, 'max_iter': 300, 'loss': 'perceptron', 'learning_rate': 'invscaling', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.0001}
[12]: 0.952 (+/-0.060) for {'validation_fraction': 0.1, 'power_t': 0.5, 'penalty': 'l1', 'n_iter_no_change': 20, 'max_iter': 900, 'loss': 'squared_hinge', 'learning_rate': 'optimal', 'eta0': 0.01, 'early_stopping': False, 'alpha': 0.0001}
[13]: 0.848 (+/-0.236) for {'validation_fraction': 0.1, 'power_t': 0.5, 'penalty': 'l1', 'n_iter_no_change': 20, 'max_iter': 900, 'loss': 'perceptron', 'learning_rate': 'constant', 'eta0': 0.01, 'early_stopping': False, 'alpha': 0.001}
[14]: 0.743 (+/-0.260) for {'validation_fraction': 0.2, 'power_t': 0.5, 'penalty': 'l2', 'n_iter_no_change': 20, 'max_iter': 900, 'loss': 'perceptron', 'learning_rate': 'constant', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.001}
[15]: 0.638 (+/-0.267) for {'validation_fraction': 0.1, 'power_t': 0.1, 'penalty': 'l2', 'n_iter_no_change': 10, 'max_iter': 1400, 'loss': 'perceptron', 'learning_rate': 'constant', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.01}
[16]: 0.895 (+/-0.229) for {'validation_fraction': 0.2, 'power_t': 0.5, 'penalty': 'l1', 'n_iter_no_change': 5, 'max_iter': 300, 'loss': 'perceptron', 'learning_rate': 'invscaling', 'eta0': 0.01, 'early_stopping': False, 'alpha': 0.01}
[17]: 0.933 (+/-0.129) for {'validation_fraction': 0.2, 'power_t': 0.5, 'penalty': 'l2', 'n_iter_no_change': 10, 'max_iter': 900, 'loss': 'squared_hinge', 'learning_rate': 'optimal', 'eta0': 0.01, 'early_stopping': False, 'alpha': 0.01}
[18]: 0.886 (+/-0.076) for {'validation_fraction': 0.2, 'power_t': 0.5, 'penalty': 'l2', 'n_iter_no_change': 10, 'max_iter': 1400, 'loss': 'hinge', 'learning_rate': 'invscaling', 'eta0': 0.1, 'early_stopping': False, 'alpha': 0.01}
[19]: 0.848 (+/-0.185) for {'validation_fraction': 0.1, 'power_t': 0.5, 'penalty': 'l1', 'n_iter_no_change': 10, 'max_iter': 1400, 'loss': 'squared_hinge', 'learning_rate': 'constant', 'eta0': 0.01, 'early_stopping': False, 'alpha': 0.0001}
Detailed classification report:
The model is trained on the full development set.
The scores are computed on the full evaluation set.
precision recall f1-score support
0 1.00 1.00 1.00 16
1 1.00 0.83 0.91 18
2 0.79 1.00 0.88 11
accuracy 0.93 45
macro avg 0.93 0.94 0.93 45
weighted avg 0.95 0.93 0.93 45
CTOR for best model: SGDClassifier(alpha=0.01, eta0=0.1, max_iter=1400, penalty='l1',
random_state=42, validation_fraction=0.2)
best: dat=iris, score=0.96190, model=SGDClassifier(alpha=0.01,early_stopping=False,eta0=0.1,learning_rate='optimal',loss='hinge',max_iter=1400,n_iter_no_change=5,penalty='l1',power_t=0.5,validation_fraction=0.2)
OK(grid-search)
Qd MNIST Search Quest II¶
Description
Finally, a search-quest competition: __who can find the best model+hyperparameters for the MNIST dataset?__You change to the MNIST data by calling LoadAndSetupData('mnist'), and this is a completely other ball-game that the iris tiny-data: it's much larger (but still far from big-data)!
You might opt for the exhaustive grid search, or use the faster but-less optimal random search...your choice.
You are free to pick any classifier in Scikit-learn, even algorithms we have not discussed yet---except Neural Networks and KNeighborsClassifier!.
Keep the score function at
f1_micro, otherwise, we will be comparing 'Ʀbler og pƦrer'.And, you may also want to scale your input data for some models to perform better.
REMEMBER, DO NOT USE any Neural Network models. This also means not to use any
KerasorTensorflowmodels...since they outperform most other models, and there are also too many examples on the internet to cut-and-paste from!
Check your result by printing the first return value from FullReport()
b1, m1 = FullReport(random_tuned , X_test, y_test, time_randomsearch)
print(b1)
that will display a result like
best: dat=mnist, score=0.90780, model=SGDClassifier(alpha=1.0,eta0=0.0001,learning_rate='invscaling')
and paste your currently best model into the message box, for ITMAL group 09 like
Grp09: best: dat=mnist, score=0.90780, model=SGDClassifier(alpha=1.0,eta0=0.0001,learning_rate='invscaling')
Grp09: CTOR for best model: SGDClassifier(alpha=1.0, average=False, class_weight=None, early_stopping=False,
epsilon=0.1, eta0=0.0001, fit_intercept=True, l1_ratio=0.15,
learning_rate='invscaling', loss='hinge', max_iter=1000,
n_iter_no_change=5, n_jobs=None, penalty='l2', power_t=0.5,
random_state=None, shuffle=True, tol=0.001,
validation_fraction=0.1, verbose=0, warm_start=False)
on Brightspace: "L09: Regularisering, optimering og sĆøgning" | "Qd MNIST Search Quest"
and, check if your score (for MNIST) is better than the currently best score. Republish if you get a better score than your own previously best. Deadline for submission of scores is the same as the deadline for the O3 journal handin.
Remember to provide an ITMAL group name manually, so we can identify a winner: the 1. st price is cake!
For the journal hand-in, report your progress in scoring choosing different models, hyperparameters to search and how you might need to preprocess your data...and note, that the journal will not be accepted unless it contains information about Your results published on the Brightspace 'Search Quest II' page!
In this section we change our loadAndSetupData to mnist. We will try to tune the parameters and model to find the best accuracy score.
best: dat=mnist, score=0.96771, model=RandomForestClassifier(bootstrap=False,criterion='gini',max_depth=None,max_features='log2',min_samples_leaf=1,min_samples_split=5,n_estimators=200)
from sklearn.ensemble import RandomForestClassifier
# Setup data
X_train, X_test, y_train, y_test = LoadAndSetupData(
'mnist') # 'iris', 'moon', or 'mnist'
# Setup search parameters
model = RandomForestClassifier(
random_state=83
)
tuning_parameters = {
'n_estimators': [10, 100, 200],
'max_depth': [None, 10, 20, 30],
'min_samples_split': [2, 5, 10],
'min_samples_leaf': [1, 2, 4],
'class_weight': [None, 'balanced', 'balanced_subsample'],
'max_leaf_nodes': [None, 5, 10, 20],
'min_impurity_decrease': [0.0, 0.1, 0.2],
'oob_score': [True, False],
'max_features': ['sqrt', 'log2'],
'bootstrap': [True],
'criterion': ['gini', 'entropy'],
'warm_start': [True, False],
'ccp_alpha': [0.0, 0.1, 0.2]
}
CV = 5
VERBOSE = 0
# Run GridSearchCV for the model
grid_tuned = RandomizedSearchCV(model,
tuning_parameters,
n_iter=100,
random_state=66,
cv=CV,
scoring='f1_micro',
verbose=VERBOSE,
n_jobs=-1)
start = time()
grid_tuned.fit(X_train, y_train)
t = time() - start
# Report result
b0, m0 = FullReport(grid_tuned, X_test, y_test, t)
print('OK(grid-search)')
DATA: mnist.. org. data: X.shape =(70000; 784), y.shape =(70000) train data: X_train.shape=(49000; 784), y_train.shape=(49000) test data: X_test.shape =(21000; 784), y_test.shape =(21000)
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:578: UserWarning: Some inputs do not have OOB scores. This probably means too few trees were used to compute any reliable OOB estimates.
warn(
/Users/my-thanhthile/Library/Python/3.9/lib/python/site-packages/sklearn/ensemble/_forest.py:780: UserWarning: class_weight presets "balanced" or "balanced_subsample" are not recommended for warm_start if the fitted data differs from the full dataset. In order to use "balanced" weights, use compute_class_weight ("balanced", classes, y). In place of y you can use a large enough sample of the full training set target to properly estimate the class frequency distributions. Pass the resulting weights as the class_weight parameter.
warn(
SEARCH TIME: 471.92 sec
Best model set found on train set:
best parameters={'warm_start': True, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
best 'f1_micro' score=0.9654285714285715
best index=49
Best estimator CTOR:
RandomForestClassifier(class_weight='balanced_subsample', criterion='entropy',
max_depth=20, min_samples_split=5, n_estimators=200,
random_state=173, warm_start=True)
Grid scores ('f1_micro') on development set:
[ 0]: 0.497 (+/-0.094) for {'warm_start': True, 'oob_score': True, 'n_estimators': 10, 'min_samples_split': 5, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': 30, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.1, 'bootstrap': True}
[ 1]: 0.568 (+/-0.009) for {'warm_start': False, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 10, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[ 2]: 0.532 (+/-0.020) for {'warm_start': False, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 10, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[ 3]: 0.112 (+/-0.000) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.1, 'bootstrap': True}
[ 4]: 0.606 (+/-0.012) for {'warm_start': False, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 5, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.2, 'bootstrap': True}
[ 5]: 0.576 (+/-0.011) for {'warm_start': False, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': None, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[ 6]: 0.651 (+/-0.006) for {'warm_start': True, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 10, 'max_features': 'log2', 'max_depth': 30, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.2, 'bootstrap': True}
[ 7]: 0.505 (+/-0.073) for {'warm_start': False, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 5, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
[ 8]: 0.958 (+/-0.005) for {'warm_start': False, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': 30, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.0, 'bootstrap': True}
[ 9]: 0.501 (+/-0.083) for {'warm_start': False, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 10, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': 30, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
[10]: 0.112 (+/-0.000) for {'warm_start': False, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.0, 'bootstrap': True}
[11]: 0.532 (+/-0.020) for {'warm_start': False, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[12]: 0.101 (+/-0.011) for {'warm_start': False, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': 30, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.2, 'bootstrap': True}
[13]: 0.101 (+/-0.011) for {'warm_start': False, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 10, 'max_features': 'log2', 'max_depth': 30, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.1, 'bootstrap': True}
[14]: 0.685 (+/-0.009) for {'warm_start': True, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[15]: 0.551 (+/-0.013) for {'warm_start': False, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 10, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.1, 'bootstrap': True}
[16]: 0.769 (+/-0.012) for {'warm_start': False, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[17]: 0.551 (+/-0.015) for {'warm_start': True, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[18]: 0.099 (+/-0.004) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 10, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 10, 'max_features': 'log2', 'max_depth': 10, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
[19]: 0.681 (+/-0.008) for {'warm_start': True, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 10, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.1, 'bootstrap': True}
[20]: 0.685 (+/-0.009) for {'warm_start': True, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 10, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[21]: 0.112 (+/-0.000) for {'warm_start': True, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': 30, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.1, 'bootstrap': True}
[22]: 0.944 (+/-0.005) for {'warm_start': True, 'oob_score': True, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 30, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[23]: 0.505 (+/-0.070) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 5, 'max_features': 'sqrt', 'max_depth': 30, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.2, 'bootstrap': True}
[24]: 0.112 (+/-0.000) for {'warm_start': False, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.0, 'bootstrap': True}
[25]: 0.100 (+/-0.011) for {'warm_start': True, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.1, 'bootstrap': True}
[26]: 0.677 (+/-0.018) for {'warm_start': False, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 30, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.1, 'bootstrap': True}
[27]: 0.779 (+/-0.009) for {'warm_start': True, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.0, 'bootstrap': True}
[28]: 0.101 (+/-0.006) for {'warm_start': True, 'oob_score': True, 'n_estimators': 10, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.1, 'bootstrap': True}
[29]: 0.112 (+/-0.000) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': 30, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[30]: 0.546 (+/-0.020) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.1, 'bootstrap': True}
[31]: 0.101 (+/-0.006) for {'warm_start': False, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[32]: 0.848 (+/-0.007) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 10, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[33]: 0.112 (+/-0.000) for {'warm_start': True, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': None, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.1, 'bootstrap': True}
[34]: 0.101 (+/-0.006) for {'warm_start': False, 'oob_score': True, 'n_estimators': 10, 'min_samples_split': 10, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 5, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[35]: 0.101 (+/-0.011) for {'warm_start': True, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 30, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.1, 'bootstrap': True}
[36]: 0.735 (+/-0.011) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 10, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[37]: 0.787 (+/-0.021) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.0, 'bootstrap': True}
[38]: 0.715 (+/-0.013) for {'warm_start': False, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': None, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.1, 'bootstrap': True}
[39]: 0.582 (+/-0.020) for {'warm_start': True, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.2, 'bootstrap': True}
[40]: 0.470 (+/-0.042) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.0, 'bootstrap': True}
[41]: 0.112 (+/-0.000) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 10, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 10, 'max_features': 'log2', 'max_depth': None, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[42]: 0.582 (+/-0.020) for {'warm_start': True, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[43]: 0.859 (+/-0.011) for {'warm_start': True, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 10, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': None, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[44]: 0.101 (+/-0.011) for {'warm_start': True, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 10, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': None, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.2, 'bootstrap': True}
[45]: 0.944 (+/-0.006) for {'warm_start': False, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 10, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.0, 'bootstrap': True}
[46]: 0.101 (+/-0.006) for {'warm_start': False, 'oob_score': True, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': None, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[47]: 0.652 (+/-0.017) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.1, 'bootstrap': True}
[48]: 0.657 (+/-0.010) for {'warm_start': False, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 10, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 30, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.0, 'bootstrap': True}
[49]: 0.965 (+/-0.003) for {'warm_start': True, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[50]: 0.101 (+/-0.011) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[51]: 0.099 (+/-0.004) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.1, 'bootstrap': True}
[52]: 0.784 (+/-0.011) for {'warm_start': False, 'oob_score': True, 'n_estimators': 10, 'min_samples_split': 10, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[53]: 0.505 (+/-0.073) for {'warm_start': False, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[54]: 0.651 (+/-0.002) for {'warm_start': True, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': None, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[55]: 0.604 (+/-0.012) for {'warm_start': False, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 30, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.1, 'bootstrap': True}
[56]: 0.602 (+/-0.010) for {'warm_start': False, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[57]: 0.101 (+/-0.006) for {'warm_start': False, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 10, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
[58]: 0.690 (+/-0.020) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.1, 'bootstrap': True}
[59]: 0.112 (+/-0.000) for {'warm_start': False, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 10, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': 30, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[60]: 0.551 (+/-0.013) for {'warm_start': False, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[61]: 0.112 (+/-0.000) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': 10, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[62]: 0.778 (+/-0.012) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[63]: 0.651 (+/-0.006) for {'warm_start': False, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[64]: 0.661 (+/-0.010) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[65]: 0.551 (+/-0.013) for {'warm_start': True, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': 30, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[66]: 0.099 (+/-0.004) for {'warm_start': True, 'oob_score': True, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
[67]: 0.100 (+/-0.011) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[68]: 0.955 (+/-0.004) for {'warm_start': False, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[69]: 0.844 (+/-0.007) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': 10, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.0, 'bootstrap': True}
[70]: 0.112 (+/-0.000) for {'warm_start': True, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'sqrt', 'max_depth': 30, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[71]: 0.505 (+/-0.073) for {'warm_start': True, 'oob_score': True, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': 30, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
[72]: 0.112 (+/-0.000) for {'warm_start': True, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.1, 'bootstrap': True}
[73]: 0.101 (+/-0.011) for {'warm_start': False, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.2, 'bootstrap': True}
[74]: 0.964 (+/-0.003) for {'warm_start': True, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': None, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.0, 'bootstrap': True}
[75]: 0.643 (+/-0.022) for {'warm_start': True, 'oob_score': True, 'n_estimators': 10, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[76]: 0.112 (+/-0.000) for {'warm_start': False, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 10, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': 10, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[77]: 0.661 (+/-0.010) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': None, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[78]: 0.101 (+/-0.006) for {'warm_start': True, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
[79]: 0.099 (+/-0.004) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 10, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
[80]: 0.099 (+/-0.004) for {'warm_start': False, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 10, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
[81]: 0.582 (+/-0.024) for {'warm_start': False, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
[82]: 0.532 (+/-0.020) for {'warm_start': True, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 10, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[83]: 0.101 (+/-0.011) for {'warm_start': False, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': 10, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.1, 'bootstrap': True}
[84]: 0.606 (+/-0.012) for {'warm_start': True, 'oob_score': True, 'n_estimators': 200, 'min_samples_split': 10, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.2, 'bootstrap': True}
[85]: 0.532 (+/-0.020) for {'warm_start': True, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 10, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': None, 'ccp_alpha': 0.1, 'bootstrap': True}
[86]: 0.582 (+/-0.020) for {'warm_start': False, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.1, 'bootstrap': True}
[87]: 0.505 (+/-0.070) for {'warm_start': False, 'oob_score': True, 'n_estimators': 10, 'min_samples_split': 5, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[88]: 0.642 (+/-0.021) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 10, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[89]: 0.582 (+/-0.020) for {'warm_start': True, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[90]: 0.112 (+/-0.000) for {'warm_start': False, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'log2', 'max_depth': None, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.0, 'bootstrap': True}
[91]: 0.112 (+/-0.000) for {'warm_start': False, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 10, 'max_features': 'log2', 'max_depth': None, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.2, 'bootstrap': True}
[92]: 0.583 (+/-0.020) for {'warm_start': False, 'oob_score': True, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': 10, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[93]: 0.100 (+/-0.011) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.1, 'max_leaf_nodes': 20, 'max_features': 'sqrt', 'max_depth': 30, 'criterion': 'gini', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
[94]: 0.101 (+/-0.011) for {'warm_start': True, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 2, 'min_samples_leaf': 1, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': 10, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'gini', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.2, 'bootstrap': True}
[95]: 0.112 (+/-0.000) for {'warm_start': True, 'oob_score': True, 'n_estimators': 10, 'min_samples_split': 2, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 10, 'max_features': 'log2', 'max_depth': None, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.1, 'bootstrap': True}
[96]: 0.501 (+/-0.083) for {'warm_start': True, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 10, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.2, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': None, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.0, 'bootstrap': True}
[97]: 0.112 (+/-0.000) for {'warm_start': True, 'oob_score': False, 'n_estimators': 200, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': 5, 'max_features': 'sqrt', 'max_depth': None, 'criterion': 'gini', 'class_weight': None, 'ccp_alpha': 0.1, 'bootstrap': True}
[98]: 0.957 (+/-0.003) for {'warm_start': False, 'oob_score': False, 'n_estimators': 100, 'min_samples_split': 5, 'min_samples_leaf': 4, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': 'balanced_subsample', 'ccp_alpha': 0.0, 'bootstrap': True}
[99]: 0.501 (+/-0.083) for {'warm_start': False, 'oob_score': False, 'n_estimators': 10, 'min_samples_split': 5, 'min_samples_leaf': 2, 'min_impurity_decrease': 0.0, 'max_leaf_nodes': None, 'max_features': 'log2', 'max_depth': 20, 'criterion': 'entropy', 'class_weight': 'balanced', 'ccp_alpha': 0.2, 'bootstrap': True}
Detailed classification report:
The model is trained on the full development set.
The scores are computed on the full evaluation set.
precision recall f1-score support
0 0.98 0.99 0.99 2077
1 0.98 0.99 0.99 2385
2 0.96 0.97 0.96 2115
3 0.97 0.94 0.95 2117
4 0.96 0.97 0.97 2004
5 0.96 0.96 0.96 1900
6 0.97 0.98 0.97 2045
7 0.97 0.96 0.97 2189
8 0.95 0.96 0.96 2042
9 0.95 0.95 0.95 2126
accuracy 0.97 21000
macro avg 0.97 0.97 0.97 21000
weighted avg 0.97 0.97 0.97 21000
CTOR for best model: RandomForestClassifier(class_weight='balanced_subsample', criterion='entropy',
max_depth=20, min_samples_split=5, n_estimators=200,
random_state=173, warm_start=True)
best: dat=mnist, score=0.96543, model=RandomForestClassifier(bootstrap=True,ccp_alpha=0.0,class_weight='balanced_subsample',criterion='entropy',max_depth=20,max_features='sqrt',max_leaf_nodes=None,min_impurity_decrease=0.0,min_samples_leaf=1,min_samples_split=5,n_estimators=200,oob_score=False,warm_start=True)
OK(grid-search)
Conclusion There have been made several adjustments, such as increasing the n-iter and decreasing, fiddling with tuning parameters: max-depth, min sample split and so on. While we had increased/decreased parameters with a significant amount, the accuracy score did not change much. We had an average accuracy score of 0.96... with only a range of 0.001 in difference, when tuning the parameters. Therefore we would suggests that problem lies with the data, that the dataset does not contain enough information to get a better accuracy score. Our final and best result is 0.96543
SWMAL Exercise¶
Advanced CNN using Roboflow¶
Read the web page 'How to Train YOLOv8 Object Detection on a Custom Dataset'
https://blog.roboflow.com/how-to-train-yolov8-on-a-custom-dataset/
Then create your own Roboflow project, with some data of your choice found on the site using
and clone an existing project. The Roboflow 'pipeline'[1] includes many steps
here you only want to look into the process and train steps and take a peek into the collect, organize and labelling step, which is normally the most time-consuming part of any ML project.
The current computer vision/ML projects ohn Roboflow include object detection, classification, instance segmentation and semantic segmentation, and you are free to choose between the different computer vision-related concepts, but object detection and classification would probably be the best choice for this journal (instance and semantic segmentation are highly complicated processes.)
Train and test a model and make documentation of the process for the journal, using images, and learning graphs. etc. from the site.
(Notice, that there is no obvious way of getting hands-on any code behind the 'pipeline' in Roboflow, if, say you want to elaborate on the Yolov models on your own computer.)
[1]^ https://2486075003-files.gitbook.io/~/files/v0/b/gitbook-x-prod.appspot.com/o/spaces%2F-M6S9nPJhEX9FYH6clfW%2Fuploads%2FfHpPTWNdCVR9qHQDeskF%2FScreen%20Shot%202022-08-24%20at%2012.35.36%20PM.png?alt=media&token=623927fe-3099-4ccd-8aaa-890bf5c0b03b
from roboflow import Roboflow
rf = Roboflow(api_key="WiAMnmKTBMwcuvnwN3Qm")
project = rf.workspace("aarhus-uni-j9joo").project("rick_astly")
dataset = project.version(1).download("yolov8")
loading Roboflow workspace... loading Roboflow project... Dependency ultralytics==8.0.196 is required but found version=8.0.208, to fix: `pip install ultralytics==8.0.196`
Downloading Dataset Version Zip in Rick_Astly-1 to yolov8:: 100%|āāāāāāāāāā| 58472/58472 [00:02<00:00, 22763.28it/s]
Extracting Dataset Version Zip to Rick_Astly-1 in yolov8:: 100%|āāāāāāāāāā| 2938/2938 [00:01<00:00, 1595.61it/s]
We have used roboflow to train a model to detect pistols. To do so we followed the structure adressed in the picture above.
Firstly we had to aquire our data. To do this we looked at roboflow universe, and found a labeled dataset that had pistols. which we downloaded and uploaded to roboflow.
Luckily for us. The dataset was already labeled, so we did not need to assign parts of the data to each of us for labeling. This would otherwise be standard procedure for using roboflow with an unlabled dataset. Afterwards you would look at you assigned images, and label items on the images(in our case pistols).
We now need to generate our dataset, and ensure that our data has the correct formats. Firstly we select seperate our data into a training set validation and test set. The validation set is essentially a test set that is reused after each epoch to give us an idea of how effective our model currently is and to enable early stopping if we are over or underfitting.
Thereafter we standardize our images to ensure they have the same amount of pixels and are oriented correctly
Finally we can augment our dataset to challenge our model further by disturbing something such as the orientation, the saturation og the noise level. This can ensure that our model becomes better at detecting the object even if the data has a tendency to fx have some dead pixels or have different levels of saturation because they where taken with different cameras.
In our case we just wanted to examplify how to use roboflow and therfore we did not add any noise.
Train
REVISIONS|| :-|:- 2023-04-10| CEF, initial.
from roboflow import Roboflow
rf = Roboflow(api_key="WiAMnmKTBMwcuvnwN3Qm")
project = rf.workspace().project("Rick-Astly-1")
model = project.version(1).model
# infer on a local image
print(model.predict("image.png", confidence=50, overlap=50).json())
loading Roboflow workspace... loading Roboflow project...
--------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) c:\Users\oscar\MAL\mal12\L10\cnn2.ipynb Cell 5 line 4 <a href='vscode-notebook-cell:/c%3A/Users/oscar/MAL/mal12/L10/cnn2.ipynb#W6sZmlsZQ%3D%3D?line=0'>1</a> from roboflow import Roboflow <a href='vscode-notebook-cell:/c%3A/Users/oscar/MAL/mal12/L10/cnn2.ipynb#W6sZmlsZQ%3D%3D?line=2'>3</a> rf = Roboflow(api_key="WiAMnmKTBMwcuvnwN3Qm") ----> <a href='vscode-notebook-cell:/c%3A/Users/oscar/MAL/mal12/L10/cnn2.ipynb#W6sZmlsZQ%3D%3D?line=3'>4</a> project = rf.workspace().project("Rick-Astly-1") <a href='vscode-notebook-cell:/c%3A/Users/oscar/MAL/mal12/L10/cnn2.ipynb#W6sZmlsZQ%3D%3D?line=4'>5</a> model = project.version(1).model <a href='vscode-notebook-cell:/c%3A/Users/oscar/MAL/mal12/L10/cnn2.ipynb#W6sZmlsZQ%3D%3D?line=6'>7</a> # infer on a local image File c:\Users\oscar\AppData\Local\Programs\Python\Python311\Lib\site-packages\roboflow\core\workspace.py:100, in Workspace.project(self, project_name) 98 # Throw error if dataset isn't valid/user doesn't have permissions to access the dataset 99 if dataset_info.status_code != 200: --> 100 raise RuntimeError(dataset_info.text) 102 dataset_info = dataset_info.json()["project"] 104 return Project(self.__api_key, dataset_info, self.model_format) RuntimeError: { "error": { "message": "Unsupported request. `GET /aarhus-uni-j9joo/Rick-Astly-1` does not exist or cannot be loaded due to missing permissions.", "type": "GraphMethodException", "hint": "You can see your active workspace by issuing a GET request to `/` with your `api_key`." } }